From c2323401c85ab7ec21a12ad491eefdfdf36a22d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 04:55:16 +0000 Subject: [PATCH] Bump google.golang.org/grpc from 1.51.0 to 1.53.0 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.51.0 to 1.53.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.51.0...v1.53.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: indirect ... Signed-off-by: dependabot[bot] --- go.mod | 25 +- go.sum | 654 +--- .../github.com/apparentlymart/go-cidr/LICENSE | 19 - .../apparentlymart/go-cidr/cidr/cidr.go | 236 -- .../apparentlymart/go-cidr/cidr/wrangling.go | 37 - .../apparentlymart/go-versions/LICENSE | 21 - .../versions/constraints/canon_style.go | 352 --- .../constraints/constraintdepth_string.go | 16 - .../go-versions/versions/constraints/doc.go | 13 - .../go-versions/versions/constraints/raw.go | 74 - .../versions/constraints/raw_scan.go | 623 ---- .../versions/constraints/raw_scan.rl | 95 - .../versions/constraints/ruby_style.go | 181 -- .../constraints/selectionop_string.go | 43 - .../go-versions/versions/constraints/spec.go | 249 -- .../versions/constraints/version.go | 81 - .../go-versions/versions/doc.go | 14 - .../go-versions/versions/list.go | 149 - .../go-versions/versions/parse.go | 243 -- .../go-versions/versions/set.go | 89 - .../go-versions/versions/set_bound.go | 98 - .../go-versions/versions/set_exact.go | 103 - .../go-versions/versions/set_extremes.go | 49 - .../go-versions/versions/set_finite.go | 34 - .../go-versions/versions/set_intersection.go | 132 - .../go-versions/versions/set_released.go | 30 - .../go-versions/versions/set_subtract.go | 56 - .../go-versions/versions/set_union.go | 121 - .../go-versions/versions/version.go | 222 -- vendor/github.com/blang/semver/.travis.yml | 21 - vendor/github.com/blang/semver/LICENSE | 22 - vendor/github.com/blang/semver/README.md | 194 -- vendor/github.com/blang/semver/json.go | 23 - vendor/github.com/blang/semver/package.json | 17 - vendor/github.com/blang/semver/range.go | 416 --- vendor/github.com/blang/semver/semver.go | 418 --- vendor/github.com/blang/semver/sort.go | 28 - vendor/github.com/blang/semver/sql.go | 30 - .../github.com/bmatcuk/doublestar/.gitignore | 32 - .../github.com/bmatcuk/doublestar/.travis.yml | 15 - vendor/github.com/bmatcuk/doublestar/LICENSE | 22 - .../github.com/bmatcuk/doublestar/README.md | 109 - .../bmatcuk/doublestar/doublestar.go | 476 --- vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 19 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 38 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 245 -- vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 43 - .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 --- .../hashicorp/go-cleanhttp/README.md | 30 - .../hashicorp/go-cleanhttp/cleanhttp.go | 58 - .../github.com/hashicorp/go-cleanhttp/doc.go | 20 - .../hashicorp/go-cleanhttp/handlers.go | 48 - .../hashicorp/go-retryablehttp/.gitignore | 3 - .../hashicorp/go-retryablehttp/.travis.yml | 12 - .../hashicorp/go-retryablehttp/LICENSE | 363 --- .../hashicorp/go-retryablehttp/Makefile | 11 - .../hashicorp/go-retryablehttp/README.md | 46 - .../hashicorp/go-retryablehttp/client.go | 528 ---- vendor/github.com/hashicorp/hcl/.gitignore | 9 - vendor/github.com/hashicorp/hcl/.travis.yml | 13 - vendor/github.com/hashicorp/hcl/LICENSE | 354 --- vendor/github.com/hashicorp/hcl/Makefile | 18 - vendor/github.com/hashicorp/hcl/README.md | 125 - vendor/github.com/hashicorp/hcl/appveyor.yml | 19 - vendor/github.com/hashicorp/hcl/decoder.go | 729 ----- vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 -- .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 532 ---- .../hashicorp/hcl/hcl/scanner/scanner.go | 652 ---- .../hashicorp/hcl/hcl/strconv/quote.go | 241 -- .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 219 -- .../hashicorp/hcl/json/parser/flatten.go | 117 - .../hashicorp/hcl/json/parser/parser.go | 313 -- .../hashicorp/hcl/json/scanner/scanner.go | 451 --- .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - vendor/github.com/hashicorp/hcl/lex.go | 38 - vendor/github.com/hashicorp/hcl/parse.go | 39 - .../hashicorp/hcl/v2/ext/dynblock/README.md | 184 -- .../hcl/v2/ext/dynblock/expand_body.go | 248 -- .../hcl/v2/ext/dynblock/expand_spec.go | 215 -- .../hcl/v2/ext/dynblock/expr_wrap.go | 42 - .../hcl/v2/ext/dynblock/iteration.go | 66 - .../hashicorp/hcl/v2/ext/dynblock/public.go | 47 - .../hashicorp/hcl/v2/ext/dynblock/schema.go | 50 - .../hcl/v2/ext/dynblock/unknown_body.go | 89 - .../hcl/v2/ext/dynblock/variables.go | 209 -- .../hcl/v2/ext/dynblock/variables_hcldec.go | 43 - .../hashicorp/hcl/v2/ext/tryfunc/README.md | 44 - .../hashicorp/hcl/v2/ext/tryfunc/tryfunc.go | 150 - .../hashicorp/hcl/v2/ext/typeexpr/README.md | 156 - .../hashicorp/hcl/v2/ext/typeexpr/defaults.go | 206 -- .../hashicorp/hcl/v2/ext/typeexpr/doc.go | 11 - .../hashicorp/hcl/v2/ext/typeexpr/get_type.go | 343 --- .../hashicorp/hcl/v2/ext/typeexpr/public.go | 139 - .../hcl/v2/ext/typeexpr/type_type.go | 118 - .../hashicorp/hcl/v2/gohcl/decode.go | 320 -- .../github.com/hashicorp/hcl/v2/gohcl/doc.go | 62 - .../hashicorp/hcl/v2/gohcl/encode.go | 191 -- .../hashicorp/hcl/v2/gohcl/schema.go | 181 -- .../hashicorp/hcl/v2/gohcl/types.go | 16 - .../hashicorp/hcl/v2/hcldec/block_labels.go | 21 - .../hashicorp/hcl/v2/hcldec/decode.go | 36 - .../github.com/hashicorp/hcl/v2/hcldec/doc.go | 12 - .../github.com/hashicorp/hcl/v2/hcldec/gob.go | 23 - .../hashicorp/hcl/v2/hcldec/public.go | 81 - .../hashicorp/hcl/v2/hcldec/schema.go | 36 - .../hashicorp/hcl/v2/hcldec/spec.go | 1675 ---------- .../hashicorp/hcl/v2/hcldec/variables.go | 36 - .../hashicorp/hcl/v2/hclparse/parser.go | 135 - .../hashicorp/hcl/v2/hclwrite/ast.go | 121 - .../hcl/v2/hclwrite/ast_attribute.go | 48 - .../hashicorp/hcl/v2/hclwrite/ast_block.go | 177 -- .../hashicorp/hcl/v2/hclwrite/ast_body.go | 239 -- .../hcl/v2/hclwrite/ast_expression.go | 224 -- .../hashicorp/hcl/v2/hclwrite/doc.go | 11 - .../hashicorp/hcl/v2/hclwrite/format.go | 463 --- .../hashicorp/hcl/v2/hclwrite/generate.go | 396 --- .../hcl/v2/hclwrite/native_node_sorter.go | 23 - .../hashicorp/hcl/v2/hclwrite/node.go | 296 -- .../hashicorp/hcl/v2/hclwrite/parser.go | 638 ---- .../hashicorp/hcl/v2/hclwrite/public.go | 44 - .../hashicorp/hcl/v2/hclwrite/tokens.go | 132 - .../github.com/hashicorp/hcl/v2/json/ast.go | 121 - .../hashicorp/hcl/v2/json/didyoumean.go | 33 - .../github.com/hashicorp/hcl/v2/json/doc.go | 12 - vendor/github.com/hashicorp/hcl/v2/json/is.go | 54 - .../hashicorp/hcl/v2/json/navigation.go | 70 - .../hashicorp/hcl/v2/json/parser.go | 504 --- .../hashicorp/hcl/v2/json/peeker.go | 25 - .../hashicorp/hcl/v2/json/public.go | 117 - .../hashicorp/hcl/v2/json/scanner.go | 306 -- .../github.com/hashicorp/hcl/v2/json/spec.md | 405 --- .../hashicorp/hcl/v2/json/structure.go | 637 ---- .../hashicorp/hcl/v2/json/tokentype_string.go | 29 - vendor/github.com/hashicorp/hil/.gitignore | 3 - vendor/github.com/hashicorp/hil/.travis.yml | 3 - vendor/github.com/hashicorp/hil/LICENSE | 353 --- vendor/github.com/hashicorp/hil/README.md | 102 - vendor/github.com/hashicorp/hil/appveyor.yml | 18 - .../hashicorp/hil/ast/arithmetic.go | 43 - .../hashicorp/hil/ast/arithmetic_op.go | 24 - vendor/github.com/hashicorp/hil/ast/ast.go | 99 - vendor/github.com/hashicorp/hil/ast/call.go | 47 - .../hashicorp/hil/ast/conditional.go | 36 - vendor/github.com/hashicorp/hil/ast/index.go | 76 - .../github.com/hashicorp/hil/ast/literal.go | 88 - vendor/github.com/hashicorp/hil/ast/output.go | 78 - vendor/github.com/hashicorp/hil/ast/scope.go | 90 - vendor/github.com/hashicorp/hil/ast/stack.go | 25 - .../hashicorp/hil/ast/type_string.go | 54 - .../github.com/hashicorp/hil/ast/unknown.go | 30 - .../hashicorp/hil/ast/variable_access.go | 36 - .../hashicorp/hil/ast/variables_helper.go | 63 - vendor/github.com/hashicorp/hil/builtins.go | 331 -- .../hashicorp/hil/check_identifier.go | 88 - .../github.com/hashicorp/hil/check_types.go | 668 ---- vendor/github.com/hashicorp/hil/convert.go | 174 -- vendor/github.com/hashicorp/hil/eval.go | 472 --- vendor/github.com/hashicorp/hil/eval_type.go | 16 - .../hashicorp/hil/evaltype_string.go | 42 - vendor/github.com/hashicorp/hil/parse.go | 29 - .../hashicorp/hil/parser/binary_op.go | 45 - .../github.com/hashicorp/hil/parser/error.go | 38 - .../github.com/hashicorp/hil/parser/fuzz.go | 28 - .../github.com/hashicorp/hil/parser/parser.go | 522 ---- .../hashicorp/hil/scanner/peeker.go | 55 - .../hashicorp/hil/scanner/scanner.go | 556 ---- .../github.com/hashicorp/hil/scanner/token.go | 105 - .../hashicorp/hil/scanner/tokentype_string.go | 51 - .../hashicorp/hil/transform_fixed.go | 29 - vendor/github.com/hashicorp/hil/walk.go | 266 -- .../hashicorp/terraform-svchost/auth/cache.go | 61 - .../terraform-svchost/auth/credentials.go | 118 - .../terraform-svchost/auth/from_map.go | 48 - .../terraform-svchost/auth/helper_program.go | 149 - .../terraform-svchost/auth/static.go | 38 - .../auth/token_credentials.go | 43 - .../terraform-svchost/disco/disco.go | 275 -- .../hashicorp/terraform-svchost/disco/host.go | 423 --- .../terraform-svchost/disco/http_transport.go | 30 - .../terraform-svchost/disco/oauth_client.go | 183 -- vendor/github.com/hashicorp/terraform/LICENSE | 354 --- .../hashicorp/terraform/addrs/count_attr.go | 12 - .../hashicorp/terraform/addrs/doc.go | 17 - .../terraform/addrs/for_each_attr.go | 12 - .../terraform/addrs/input_variable.go | 50 - .../hashicorp/terraform/addrs/instance_key.go | 135 - .../hashicorp/terraform/addrs/local_value.go | 48 - .../hashicorp/terraform/addrs/module.go | 140 - .../hashicorp/terraform/addrs/module_call.go | 102 - .../terraform/addrs/module_instance.go | 459 --- .../hashicorp/terraform/addrs/output_value.go | 75 - .../hashicorp/terraform/addrs/parse_ref.go | 345 --- .../hashicorp/terraform/addrs/parse_target.go | 318 -- .../hashicorp/terraform/addrs/path_attr.go | 12 - .../hashicorp/terraform/addrs/provider.go | 419 --- .../terraform/addrs/provider_config.go | 400 --- .../terraform/addrs/referenceable.go | 20 - .../hashicorp/terraform/addrs/resource.go | 335 -- .../terraform/addrs/resource_phase.go | 105 - .../terraform/addrs/resourcemode_string.go | 33 - .../hashicorp/terraform/addrs/self.go | 14 - .../hashicorp/terraform/addrs/targetable.go | 26 - .../terraform/addrs/terraform_attr.go | 12 - .../hashicorp/terraform/config/append.go | 92 - .../hashicorp/terraform/config/config.go | 1171 ------- .../terraform/config/config_string.go | 378 --- .../terraform/config/config_terraform.go | 117 - .../hashicorp/terraform/config/config_tree.go | 43 - .../hashicorp/terraform/config/import_tree.go | 151 - .../hashicorp/terraform/config/interpolate.go | 435 --- .../terraform/config/interpolate_walk.go | 282 -- .../hashicorp/terraform/config/loader.go | 212 -- .../hashicorp/terraform/config/loader_hcl.go | 1270 -------- .../hashicorp/terraform/config/loader_hcl2.go | 473 --- .../hashicorp/terraform/config/merge.go | 204 -- .../hashicorp/terraform/config/providers.go | 61 - .../terraform/config/provisioner_enums.go | 40 - .../hashicorp/terraform/config/raw_config.go | 406 --- .../terraform/config/resource_mode.go | 9 - .../terraform/config/resource_mode_string.go | 24 - .../hashicorp/terraform/config/testing.go | 17 - .../hashicorp/terraform/configs/backend.go | 55 - .../terraform/configs/compat_shim.go | 164 - .../hashicorp/terraform/configs/config.go | 348 --- .../terraform/configs/config_build.go | 180 -- .../configs/configschema/coerce_value.go | 250 -- .../configs/configschema/decoder_spec.go | 123 - .../terraform/configs/configschema/doc.go | 14 - .../configs/configschema/empty_value.go | 59 - .../configs/configschema/implied_type.go | 42 - .../configs/configschema/internal_validate.go | 105 - .../configschema/nestingmode_string.go | 28 - .../configs/configschema/none_required.go | 38 - .../terraform/configs/configschema/schema.go | 145 - .../configschema/validate_traversal.go | 173 -- .../hashicorp/terraform/configs/depends_on.go | 23 - .../hashicorp/terraform/configs/doc.go | 19 - .../terraform/configs/experiments.go | 143 - .../terraform/configs/hcl2shim/flatmap.go | 424 --- .../terraform/configs/hcl2shim/paths.go | 276 -- .../configs/hcl2shim/single_attr_body.go | 85 - .../terraform/configs/hcl2shim/values.go | 353 --- .../configs/hcl2shim/values_equiv.go | 214 -- .../hashicorp/terraform/configs/module.go | 518 ---- .../terraform/configs/module_call.go | 173 -- .../terraform/configs/module_merge.go | 244 -- .../terraform/configs/module_merge_body.go | 143 - .../terraform/configs/named_values.go | 574 ---- .../hashicorp/terraform/configs/parser.go | 100 - .../terraform/configs/parser_config.go | 290 -- .../terraform/configs/parser_config_dir.go | 163 - .../terraform/configs/parser_values.go | 43 - .../hashicorp/terraform/configs/provider.go | 244 -- .../terraform/configs/provider_meta.go | 25 - .../configs/provider_requirements.go | 124 - .../terraform/configs/provisioner.go | 204 -- .../configs/provisioneronfailure_string.go | 25 - .../configs/provisionerwhen_string.go | 25 - .../hashicorp/terraform/configs/resource.go | 517 ---- .../hashicorp/terraform/configs/synth_body.go | 118 - .../hashicorp/terraform/configs/util.go | 63 - .../terraform/configs/variable_type_hint.go | 45 - .../configs/variabletypehint_string.go | 39 - .../terraform/configs/version_constraint.go | 71 - .../github.com/hashicorp/terraform/dag/dag.go | 354 --- .../github.com/hashicorp/terraform/dag/dot.go | 282 -- .../hashicorp/terraform/dag/edge.go | 37 - .../hashicorp/terraform/dag/graph.go | 329 -- .../hashicorp/terraform/dag/marshal.go | 232 -- .../github.com/hashicorp/terraform/dag/set.go | 105 - .../hashicorp/terraform/dag/tarjan.go | 107 - .../hashicorp/terraform/dag/walk.go | 454 --- .../hashicorp/terraform/experiments/doc.go | 9 - .../hashicorp/terraform/experiments/errors.go | 26 - .../terraform/experiments/experiment.go | 93 - .../hashicorp/terraform/experiments/set.go | 46 - .../terraform/experiments/testing.go | 33 - .../helper/didyoumean/name_suggestion.go | 24 - .../helper/hilmapstructure/hilmapstructure.go | 41 - .../terraform/helper/logging/indent.go | 23 - .../terraform/helper/logging/level.go | 159 - .../terraform/helper/logging/logging.go | 109 - .../terraform/helper/logging/transport.go | 70 - .../hashicorp/terraform/httpclient/client.go | 18 - .../terraform/httpclient/useragent.go | 56 - .../hashicorp/terraform/instances/expander.go | 362 --- .../terraform/instances/expansion_mode.go | 85 - .../terraform/instances/instance_key_data.go | 28 - .../terraform/internal/getproviders/doc.go | 11 - .../terraform/internal/getproviders/errors.go | 204 -- .../getproviders/filesystem_mirror_source.go | 126 - .../getproviders/filesystem_search.go | 258 -- .../terraform/internal/getproviders/hash.go | 157 - .../getproviders/http_mirror_source.go | 42 - .../internal/getproviders/legacy_lookup.go | 126 - .../internal/getproviders/memoize_source.go | 100 - .../internal/getproviders/mock_source.go | 204 -- .../internal/getproviders/multi_source.go | 251 -- .../getproviders/package_authentication.go | 355 --- .../internal/getproviders/public_keys.go | 89 - .../internal/getproviders/registry_client.go | 565 ---- .../internal/getproviders/registry_source.go | 173 -- .../terraform/internal/getproviders/source.go | 13 - .../terraform/internal/getproviders/types.go | 397 --- .../terraform/lang/blocktoattr/doc.go | 5 - .../terraform/lang/blocktoattr/fixup.go | 187 -- .../terraform/lang/blocktoattr/schema.go | 146 - .../terraform/lang/blocktoattr/variables.go | 45 - .../hashicorp/terraform/lang/data.go | 33 - .../hashicorp/terraform/lang/doc.go | 5 - .../hashicorp/terraform/lang/eval.go | 365 --- .../hashicorp/terraform/lang/funcs/cidr.go | 218 -- .../terraform/lang/funcs/collection.go | 629 ---- .../terraform/lang/funcs/conversion.go | 87 - .../hashicorp/terraform/lang/funcs/crypto.go | 329 -- .../terraform/lang/funcs/datetime.go | 70 - .../terraform/lang/funcs/encoding.go | 140 - .../terraform/lang/funcs/filesystem.go | 453 --- .../hashicorp/terraform/lang/funcs/number.go | 169 -- .../hashicorp/terraform/lang/funcs/string.go | 53 - .../hashicorp/terraform/lang/functions.go | 166 - .../hashicorp/terraform/lang/references.go | 81 - .../hashicorp/terraform/lang/scope.go | 34 - .../hashicorp/terraform/plans/action.go | 22 - .../terraform/plans/action_string.go | 49 - .../hashicorp/terraform/plans/changes.go | 354 --- .../hashicorp/terraform/plans/changes_src.go | 190 -- .../terraform/plans/changes_state.go | 15 - .../hashicorp/terraform/plans/changes_sync.go | 184 -- .../hashicorp/terraform/plans/doc.go | 5 - .../terraform/plans/dynamic_value.go | 96 - .../terraform/plans/objchange/action.go | 40 - .../terraform/plans/objchange/all_null.go | 18 - .../terraform/plans/objchange/compatible.go | 447 --- .../terraform/plans/objchange/doc.go | 4 - .../terraform/plans/objchange/lcs.go | 104 - .../plans/objchange/normalize_obj.go | 132 - .../terraform/plans/objchange/objchange.go | 390 --- .../terraform/plans/objchange/plan_valid.go | 267 -- .../hashicorp/terraform/plans/plan.go | 92 - .../terraform/plugin/discovery/find.go | 191 -- .../terraform/plugin/discovery/get_cache.go | 48 - .../terraform/plugin/discovery/meta.go | 41 - .../terraform/plugin/discovery/meta_set.go | 195 -- .../plugin/discovery/requirements.go | 111 - .../terraform/plugin/discovery/version.go | 77 - .../terraform/plugin/discovery/version_set.go | 89 - .../terraform/providers/addressed_types.go | 33 - .../hashicorp/terraform/providers/doc.go | 3 - .../hashicorp/terraform/providers/factory.go | 63 - .../hashicorp/terraform/providers/provider.go | 386 --- .../hashicorp/terraform/provisioners/doc.go | 3 - .../terraform/provisioners/factory.go | 19 - .../terraform/provisioners/provisioner.go | 82 - .../hashicorp/terraform/states/doc.go | 3 - .../terraform/states/instance_generation.go | 24 - .../terraform/states/instance_object.go | 128 - .../terraform/states/instance_object_src.go | 115 - .../hashicorp/terraform/states/module.go | 323 -- .../terraform/states/objectstatus_string.go | 33 - .../terraform/states/output_value.go | 16 - .../hashicorp/terraform/states/resource.go | 215 -- .../hashicorp/terraform/states/state.go | 298 -- .../terraform/states/state_deepcopy.go | 225 -- .../hashicorp/terraform/states/state_equal.go | 18 - .../terraform/states/state_string.go | 277 -- .../terraform/states/statefile/diagnostics.go | 62 - .../terraform/states/statefile/doc.go | 3 - .../terraform/states/statefile/file.go | 62 - .../states/statefile/marshal_equal.go | 40 - .../terraform/states/statefile/read.go | 209 -- .../terraform/states/statefile/version0.go | 23 - .../terraform/states/statefile/version1.go | 174 -- .../states/statefile/version1_upgrade.go | 172 -- .../terraform/states/statefile/version2.go | 209 -- .../states/statefile/version2_upgrade.go | 145 - .../terraform/states/statefile/version3.go | 50 - .../states/statefile/version3_upgrade.go | 500 --- .../terraform/states/statefile/version4.go | 576 ---- .../terraform/states/statefile/write.go | 17 - .../hashicorp/terraform/states/sync.go | 557 ---- .../hashicorp/terraform/terraform/context.go | 886 ------ .../terraform/terraform/context_components.go | 65 - .../terraform/terraform/context_graph_type.go | 32 - .../terraform/terraform/context_import.go | 70 - .../terraform/terraform/context_input.go | 190 -- .../hashicorp/terraform/terraform/diff.go | 1451 --------- .../hashicorp/terraform/terraform/eval.go | 62 - .../terraform/terraform/eval_apply.go | 705 ----- .../terraform/eval_check_prevent_destroy.go | 49 - .../terraform/terraform/eval_context.go | 168 - .../terraform/eval_context_builtin.go | 355 --- .../terraform/terraform/eval_context_mock.go | 344 --- .../terraform/terraform/eval_count.go | 124 - .../terraform/eval_count_boundary.go | 76 - .../terraform/terraform/eval_diff.go | 831 ----- .../terraform/terraform/eval_error.go | 20 - .../terraform/terraform/eval_filter.go | 25 - .../terraform/eval_filter_operation.go | 49 - .../terraform/terraform/eval_for_each.go | 120 - .../hashicorp/terraform/terraform/eval_if.go | 26 - .../terraform/terraform/eval_import_state.go | 95 - .../terraform/terraform/eval_lang.go | 61 - .../terraform/terraform/eval_local.go | 74 - .../terraform/terraform/eval_noop.go | 8 - .../terraform/terraform/eval_output.go | 135 - .../terraform/terraform/eval_provider.go | 146 - .../terraform/terraform/eval_provisioner.go | 55 - .../terraform/terraform/eval_read_data.go | 293 -- .../terraform/eval_read_data_apply.go | 98 - .../terraform/eval_read_data_plan.go | 173 -- .../terraform/terraform/eval_refresh.go | 137 - .../terraform/terraform/eval_sequence.go | 42 - .../terraform/terraform/eval_state.go | 585 ---- .../terraform/terraform/eval_state_upgrade.go | 107 - .../terraform/terraform/eval_validate.go | 624 ---- .../terraform/eval_validate_selfref.go | 67 - .../terraform/terraform/eval_variable.go | 245 -- .../terraform/terraform/evaltree_provider.go | 86 - .../hashicorp/terraform/terraform/evaluate.go | 867 ------ .../terraform/terraform/evaluate_valid.go | 296 -- .../hashicorp/terraform/terraform/features.go | 7 - .../hashicorp/terraform/terraform/graph.go | 107 - .../terraform/terraform/graph_builder.go | 77 - .../terraform/graph_builder_apply.go | 202 -- .../terraform/graph_builder_destroy_plan.go | 100 - .../terraform/terraform/graph_builder_eval.go | 115 - .../terraform/graph_builder_import.go | 100 - .../terraform/terraform/graph_builder_plan.go | 217 -- .../terraform/graph_builder_refresh.go | 200 -- .../terraform/graph_builder_validate.go | 40 - .../terraform/terraform/graph_dot.go | 9 - .../terraform/graph_interface_subgraph.go | 17 - .../terraform/terraform/graph_walk.go | 34 - .../terraform/terraform/graph_walk_context.go | 164 - .../terraform/graph_walk_operation.go | 18 - .../terraform/terraform/graphtype_string.go | 30 - .../hashicorp/terraform/terraform/hook.go | 161 - .../terraform/terraform/hook_mock.go | 274 -- .../terraform/terraform/hook_stop.go | 100 - .../terraform/terraform/instance_expanders.go | 7 - .../terraform/terraform/instancetype.go | 13 - .../terraform/instancetype_string.go | 26 - .../terraform/node_count_boundary.go | 22 - .../terraform/terraform/node_data_destroy.go | 40 - .../terraform/terraform/node_data_refresh.go | 276 -- .../terraform/terraform/node_local.go | 152 - .../terraform/terraform/node_module_expand.go | 289 -- .../terraform/node_module_variable.go | 225 -- .../terraform/terraform/node_output.go | 302 -- .../terraform/terraform/node_provider.go | 11 - .../terraform/node_provider_abstract.go | 103 - .../terraform/node_provider_disabled.go | 27 - .../terraform/terraform/node_provider_eval.go | 18 - .../terraform/terraform/node_provisioner.go | 44 - .../terraform/node_resource_abstract.go | 439 --- .../terraform/node_resource_apply.go | 115 - .../terraform/node_resource_apply_instance.go | 444 --- .../terraform/node_resource_destroy.go | 284 -- .../node_resource_destroy_deposed.go | 314 -- .../terraform/terraform/node_resource_plan.go | 286 -- .../terraform/node_resource_plan_destroy.go | 88 - .../terraform/node_resource_plan_instance.go | 164 - .../terraform/node_resource_plan_orphan.go | 84 - .../terraform/node_resource_refresh.go | 379 --- .../terraform/node_resource_validate.go | 99 - .../terraform/terraform/node_root_variable.go | 65 - .../terraform/terraform/node_value.go | 10 - .../hashicorp/terraform/terraform/plan.go | 122 - .../terraform/terraform/provider_mock.go | 522 ---- .../terraform/terraform/provisioner_mock.go | 153 - .../hashicorp/terraform/terraform/resource.go | 551 ---- .../terraform/terraform/resource_address.go | 618 ---- .../terraform/terraform/resource_mode.go | 12 - .../terraform/resource_mode_string.go | 24 - .../terraform/terraform/resource_provider.go | 236 -- .../terraform/resource_provider_mock.go | 315 -- .../terraform/resource_provisioner.go | 70 - .../terraform/resource_provisioner_mock.go | 87 - .../hashicorp/terraform/terraform/schemas.go | 285 -- .../hashicorp/terraform/terraform/state.go | 2255 -------------- .../terraform/terraform/state_filter.go | 267 -- .../terraform/state_upgrade_v1_to_v2.go | 189 -- .../terraform/state_upgrade_v2_to_v3.go | 142 - .../hashicorp/terraform/terraform/state_v1.go | 145 - .../hashicorp/terraform/terraform/testing.go | 19 - .../terraform/terraform/transform.go | 63 - .../transform_attach_config_provider.go | 16 - .../transform_attach_config_provider_meta.go | 15 - .../transform_attach_config_resource.go | 110 - .../terraform/transform_attach_schema.go | 101 - .../terraform/transform_attach_state.go | 68 - .../terraform/terraform/transform_config.go | 104 - .../terraform/transform_count_boundary.go | 33 - .../terraform/transform_destroy_cbd.go | 169 -- .../terraform/transform_destroy_edge.go | 304 -- .../terraform/terraform/transform_diff.go | 183 -- .../terraform/terraform/transform_expand.go | 9 - .../terraform/transform_import_provider.go | 44 - .../terraform/transform_import_state.go | 283 -- .../terraform/terraform/transform_local.go | 42 - .../terraform/transform_module_expansion.go | 141 - .../terraform/transform_module_variable.go | 120 - .../terraform/transform_orphan_count.go | 55 - .../terraform/transform_orphan_output.go | 60 - .../terraform/transform_orphan_resource.go | 95 - .../terraform/terraform/transform_output.go | 103 - .../terraform/terraform/transform_provider.go | 742 ----- .../terraform/transform_provisioner.go | 179 -- .../terraform/transform_reference.go | 475 --- .../terraform/transform_removed_modules.go | 44 - .../terraform/transform_resource_count.go | 36 - .../terraform/terraform/transform_root.go | 65 - .../terraform/terraform/transform_state.go | 72 - .../terraform/terraform/transform_targets.go | 263 -- .../transform_transitive_reduction.go | 20 - .../terraform/terraform/transform_variable.go | 40 - .../terraform/terraform/transform_vertex.go | 44 - .../hashicorp/terraform/terraform/ui_input.go | 28 - .../terraform/terraform/ui_input_mock.go | 25 - .../terraform/terraform/ui_input_prefix.go | 20 - .../terraform/terraform/ui_output.go | 7 - .../terraform/terraform/ui_output_callback.go | 9 - .../terraform/terraform/ui_output_mock.go | 21 - .../terraform/ui_output_provisioner.go | 19 - .../terraform/terraform/user_agent.go | 12 - .../hashicorp/terraform/terraform/util.go | 75 - .../terraform/valuesourcetype_string.go | 59 - .../terraform/terraform/variables.go | 313 -- .../hashicorp/terraform/terraform/version.go | 10 - .../terraform/terraform/version_required.go | 62 - .../terraform/walkoperation_string.go | 31 - .../terraform/tfdiags/config_traversals.go | 68 - .../terraform/tfdiags/consolidate_warnings.go | 146 - .../hashicorp/terraform/tfdiags/contextual.go | 372 --- .../hashicorp/terraform/tfdiags/diagnostic.go | 40 - .../terraform/tfdiags/diagnostic_base.go | 31 - .../terraform/tfdiags/diagnostics.go | 330 -- .../hashicorp/terraform/tfdiags/doc.go | 16 - .../hashicorp/terraform/tfdiags/error.go | 28 - .../hashicorp/terraform/tfdiags/hcl.go | 141 - .../terraform/tfdiags/rpc_friendly.go | 59 - .../terraform/tfdiags/severity_string.go | 29 - .../terraform/tfdiags/simple_warning.go | 30 - .../terraform/tfdiags/source_range.go | 35 - .../hashicorp/terraform/tfdiags/sourceless.go | 13 - .../hashicorp/terraform/version/version.go | 40 - .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 167 - .../mitchellh/hashstructure/LICENSE | 21 - .../mitchellh/hashstructure/README.md | 65 - .../mitchellh/hashstructure/hashstructure.go | 358 --- .../mitchellh/hashstructure/include.go | 15 - vendor/github.com/spf13/afero/.gitignore | 2 - vendor/github.com/spf13/afero/LICENSE.txt | 174 -- vendor/github.com/spf13/afero/README.md | 442 --- vendor/github.com/spf13/afero/afero.go | 111 - vendor/github.com/spf13/afero/appveyor.yml | 10 - vendor/github.com/spf13/afero/basepath.go | 222 -- .../github.com/spf13/afero/cacheOnReadFs.go | 315 -- vendor/github.com/spf13/afero/const_bsds.go | 23 - .../github.com/spf13/afero/const_win_unix.go | 22 - .../github.com/spf13/afero/copyOnWriteFs.go | 327 -- vendor/github.com/spf13/afero/httpFs.go | 114 - .../spf13/afero/internal/common/adapters.go | 27 - vendor/github.com/spf13/afero/iofs.go | 298 -- vendor/github.com/spf13/afero/ioutil.go | 243 -- vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 359 --- vendor/github.com/spf13/afero/memmap.go | 422 --- vendor/github.com/spf13/afero/os.go | 113 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 96 - vendor/github.com/spf13/afero/regexpfs.go | 223 -- vendor/github.com/spf13/afero/symlink.go | 55 - vendor/github.com/spf13/afero/unionFile.go | 330 -- vendor/github.com/spf13/afero/util.go | 329 -- .../zclconf/go-cty-yaml/.travis.yml | 5 - .../zclconf/go-cty-yaml/CHANGELOG.md | 16 - vendor/github.com/zclconf/go-cty-yaml/LICENSE | 201 -- .../zclconf/go-cty-yaml/LICENSE.libyaml | 31 - vendor/github.com/zclconf/go-cty-yaml/NOTICE | 20 - vendor/github.com/zclconf/go-cty-yaml/apic.go | 739 ----- .../zclconf/go-cty-yaml/converter.go | 69 - .../zclconf/go-cty-yaml/cty_funcs.go | 57 - .../github.com/zclconf/go-cty-yaml/decode.go | 261 -- .../zclconf/go-cty-yaml/emitterc.go | 1685 ----------- .../github.com/zclconf/go-cty-yaml/encode.go | 189 -- .../github.com/zclconf/go-cty-yaml/error.go | 97 - .../zclconf/go-cty-yaml/implied_type.go | 268 -- .../github.com/zclconf/go-cty-yaml/parserc.go | 1095 ------- .../github.com/zclconf/go-cty-yaml/readerc.go | 412 --- .../github.com/zclconf/go-cty-yaml/resolve.go | 293 -- .../zclconf/go-cty-yaml/scannerc.go | 2696 ----------------- .../github.com/zclconf/go-cty-yaml/writerc.go | 26 - vendor/github.com/zclconf/go-cty-yaml/yaml.go | 215 -- .../github.com/zclconf/go-cty-yaml/yamlh.go | 738 ----- .../zclconf/go-cty-yaml/yamlprivateh.go | 173 -- .../zclconf/go-cty/cty/msgpack/doc.go | 14 - .../zclconf/go-cty/cty/msgpack/dynamic.go | 31 - .../zclconf/go-cty/cty/msgpack/infinity.go | 8 - .../zclconf/go-cty/cty/msgpack/marshal.go | 212 -- .../go-cty/cty/msgpack/type_implied.go | 167 - .../zclconf/go-cty/cty/msgpack/unknown.go | 16 - .../zclconf/go-cty/cty/msgpack/unmarshal.go | 334 -- vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/bcrypt/base64.go | 35 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 304 -- vendor/golang.org/x/crypto/blowfish/block.go | 159 - vendor/golang.org/x/crypto/blowfish/cipher.go | 99 - vendor/golang.org/x/crypto/blowfish/const.go | 199 -- vendor/golang.org/x/crypto/cast5/cast5.go | 536 ---- .../x/crypto/chacha20/chacha_arm64.go | 17 - .../x/crypto/chacha20/chacha_arm64.s | 308 -- .../x/crypto/chacha20/chacha_generic.go | 398 --- .../x/crypto/chacha20/chacha_noasm.go | 14 - .../x/crypto/chacha20/chacha_ppc64le.go | 17 - .../x/crypto/chacha20/chacha_ppc64le.s | 450 --- .../x/crypto/chacha20/chacha_s390x.go | 28 - .../x/crypto/chacha20/chacha_s390x.s | 225 -- vendor/golang.org/x/crypto/chacha20/xor.go | 42 - .../x/crypto/curve25519/curve25519.go | 146 - .../x/crypto/curve25519/internal/field/README | 7 - .../x/crypto/curve25519/internal/field/fe.go | 416 --- .../curve25519/internal/field/fe_amd64.go | 16 - .../curve25519/internal/field/fe_amd64.s | 379 --- .../internal/field/fe_amd64_noasm.go | 12 - .../curve25519/internal/field/fe_arm64.go | 16 - .../curve25519/internal/field/fe_arm64.s | 43 - .../internal/field/fe_arm64_noasm.go | 12 - .../curve25519/internal/field/fe_generic.go | 264 -- .../curve25519/internal/field/sync.checkpoint | 1 - .../crypto/curve25519/internal/field/sync.sh | 19 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 - .../x/crypto/internal/alias/alias.go | 32 - .../x/crypto/internal/alias/alias_purego.go | 35 - .../x/crypto/internal/poly1305/bits_compat.go | 40 - .../x/crypto/internal/poly1305/bits_go1.13.go | 22 - .../x/crypto/internal/poly1305/mac_noasm.go | 10 - .../x/crypto/internal/poly1305/poly1305.go | 99 - .../x/crypto/internal/poly1305/sum_amd64.go | 48 - .../x/crypto/internal/poly1305/sum_amd64.s | 109 - .../x/crypto/internal/poly1305/sum_generic.go | 309 -- .../x/crypto/internal/poly1305/sum_ppc64le.go | 48 - .../x/crypto/internal/poly1305/sum_ppc64le.s | 182 -- .../x/crypto/internal/poly1305/sum_s390x.go | 77 - .../x/crypto/internal/poly1305/sum_s390x.s | 504 --- .../x/crypto/openpgp/armor/armor.go | 232 -- .../x/crypto/openpgp/armor/encode.go | 161 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/elgamal/elgamal.go | 130 - .../x/crypto/openpgp/errors/errors.go | 78 - vendor/golang.org/x/crypto/openpgp/keys.go | 693 ----- .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 208 -- .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 161 - .../x/crypto/openpgp/packet/packet.go | 590 ---- .../x/crypto/openpgp/packet/private_key.go | 384 --- .../x/crypto/openpgp/packet/public_key.go | 753 ----- .../x/crypto/openpgp/packet/public_key_v3.go | 279 -- .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 ----- .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 -- .../x/crypto/openpgp/packet/userattribute.go | 90 - .../x/crypto/openpgp/packet/userid.go | 159 - vendor/golang.org/x/crypto/openpgp/read.go | 448 --- vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 -- vendor/golang.org/x/crypto/openpgp/write.go | 418 --- vendor/golang.org/x/crypto/ssh/buffer.go | 97 - vendor/golang.org/x/crypto/ssh/certs.go | 589 ---- vendor/golang.org/x/crypto/ssh/channel.go | 633 ---- vendor/golang.org/x/crypto/ssh/cipher.go | 788 ----- vendor/golang.org/x/crypto/ssh/client.go | 282 -- vendor/golang.org/x/crypto/ssh/client_auth.go | 725 ----- vendor/golang.org/x/crypto/ssh/common.go | 445 --- vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 22 - vendor/golang.org/x/crypto/ssh/handshake.go | 735 ----- .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 - vendor/golang.org/x/crypto/ssh/kex.go | 774 ----- vendor/golang.org/x/crypto/ssh/keys.go | 1447 --------- vendor/golang.org/x/crypto/ssh/mac.go | 61 - vendor/golang.org/x/crypto/ssh/messages.go | 877 ------ vendor/golang.org/x/crypto/ssh/mux.go | 351 --- vendor/golang.org/x/crypto/ssh/server.go | 755 ----- vendor/golang.org/x/crypto/ssh/session.go | 647 ---- vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 - vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 - vendor/golang.org/x/crypto/ssh/tcpip.go | 474 --- vendor/golang.org/x/crypto/ssh/transport.go | 357 --- vendor/golang.org/x/mod/LICENSE | 27 - vendor/golang.org/x/mod/PATENTS | 22 - vendor/golang.org/x/mod/sumdb/dirhash/hash.go | 135 - vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 18 - vendor/golang.org/x/sys/cpu/byteorder.go | 66 - vendor/golang.org/x/sys/cpu/cpu.go | 287 -- vendor/golang.org/x/sys/cpu/cpu_aix.go | 34 - vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 - vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 -- vendor/golang.org/x/sys/cpu/cpu_arm64.s | 32 - vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 22 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 17 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 23 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 39 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 33 - vendor/golang.org/x/sys/cpu/cpu_linux.go | 16 - vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 111 - .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 24 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 10 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 32 - .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 - vendor/golang.org/x/sys/cpu/cpu_loong64.go | 13 - vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 16 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 12 - .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 -- .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 - .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 - vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 - .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 - .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 15 - .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 - vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 -- vendor/golang.org/x/sys/cpu/cpu_s390x.s | 58 - vendor/golang.org/x/sys/cpu/cpu_wasm.go | 18 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 145 - vendor/golang.org/x/sys/cpu/cpu_x86.s | 28 - vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 - vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 - vendor/golang.org/x/sys/cpu/endian_big.go | 11 - vendor/golang.org/x/sys/cpu/endian_little.go | 11 - vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 - vendor/golang.org/x/sys/cpu/parse.go | 43 - .../x/sys/cpu/proc_cpuinfo_linux.go | 54 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 - vendor/golang.org/x/text/runes/cond.go | 187 -- vendor/golang.org/x/text/runes/runes.go | 355 --- .../googleapis/rpc/status/status.pb.go | 10 +- .../grpc/balancer/balancer.go | 8 + .../grpc_binarylog_v1/binarylog.pb.go | 7 +- vendor/google.golang.org/grpc/clientconn.go | 44 +- .../google.golang.org/grpc/credentials/tls.go | 4 +- vendor/google.golang.org/grpc/dialoptions.go | 25 +- .../grpc/encoding/encoding.go | 4 +- .../grpc/grpclog/loggerv2.go | 7 +- .../grpc/health/grpc_health_v1/health.pb.go | 7 +- .../grpc/internal/binarylog/method_logger.go | 128 +- .../grpc/internal/binarylog/sink.go | 12 +- .../grpc/internal/envconfig/envconfig.go | 39 +- .../grpc/internal/envconfig/xds.go | 31 +- .../grpc/internal/internal.go | 3 + .../internal/resolver/dns/dns_resolver.go | 6 +- .../resolver/passthrough/passthrough.go | 11 +- .../grpc/internal/resolver/unix/unix.go | 4 +- .../grpc/internal/transport/controlbuf.go | 62 +- .../grpc/internal/transport/defaults.go | 6 + .../grpc/internal/transport/handler_server.go | 45 +- .../grpc/internal/transport/http2_client.go | 98 +- .../grpc/internal/transport/http2_server.go | 142 +- .../grpc/internal/transport/transport.go | 6 +- .../google.golang.org/grpc/picker_wrapper.go | 28 +- vendor/google.golang.org/grpc/pickfirst.go | 6 +- .../grpc_reflection_v1alpha/reflection.pb.go | 360 +-- .../grpc_reflection_v1alpha/reflection.proto | 138 - .../reflection_grpc.pb.go | 10 +- .../grpc/reflection/serverreflection.go | 66 +- vendor/google.golang.org/grpc/regenerate.sh | 7 +- .../grpc/resolver/resolver.go | 22 +- vendor/google.golang.org/grpc/rpc_util.go | 17 +- vendor/google.golang.org/grpc/server.go | 79 +- .../google.golang.org/grpc/service_config.go | 10 +- vendor/google.golang.org/grpc/stream.go | 42 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 23 +- vendor/modules.txt | 128 +- 813 files changed, 787 insertions(+), 133372 deletions(-) delete mode 100644 vendor/github.com/apparentlymart/go-cidr/LICENSE delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/LICENSE delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/canon_style.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/constraintdepth_string.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/doc.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/raw.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.rl delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/ruby_style.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/selectionop_string.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/spec.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/constraints/version.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/doc.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/list.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/parse.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_bound.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_exact.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_extremes.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_finite.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_intersection.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_released.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_subtract.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/set_union.go delete mode 100644 vendor/github.com/apparentlymart/go-versions/versions/version.go delete mode 100644 vendor/github.com/blang/semver/.travis.yml delete mode 100644 vendor/github.com/blang/semver/LICENSE delete mode 100644 vendor/github.com/blang/semver/README.md delete mode 100644 vendor/github.com/blang/semver/json.go delete mode 100644 vendor/github.com/blang/semver/package.json delete mode 100644 vendor/github.com/blang/semver/range.go delete mode 100644 vendor/github.com/blang/semver/semver.go delete mode 100644 vendor/github.com/blang/semver/sort.go delete mode 100644 vendor/github.com/blang/semver/sql.go delete mode 100644 vendor/github.com/bmatcuk/doublestar/.gitignore delete mode 100644 vendor/github.com/bmatcuk/doublestar/.travis.yml delete mode 100644 vendor/github.com/bmatcuk/doublestar/LICENSE delete mode 100644 vendor/github.com/bmatcuk/doublestar/README.md delete mode 100644 vendor/github.com/bmatcuk/doublestar/doublestar.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go delete mode 100644 vendor/github.com/hashicorp/hcl/.gitignore delete mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml delete mode 100644 vendor/github.com/hashicorp/hcl/LICENSE delete mode 100644 vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 vendor/github.com/hashicorp/hcl/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 vendor/github.com/hashicorp/hcl/parse.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/defaults.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/types.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/is.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/navigation.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/peeker.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/spec.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/structure.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/.gitignore delete mode 100644 vendor/github.com/hashicorp/hil/.travis.yml delete mode 100644 vendor/github.com/hashicorp/hil/LICENSE delete mode 100644 vendor/github.com/hashicorp/hil/README.md delete mode 100644 vendor/github.com/hashicorp/hil/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic_op.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/call.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/conditional.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/index.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/literal.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/output.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/scope.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/stack.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/type_string.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/unknown.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/variable_access.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/variables_helper.go delete mode 100644 vendor/github.com/hashicorp/hil/builtins.go delete mode 100644 vendor/github.com/hashicorp/hil/check_identifier.go delete mode 100644 vendor/github.com/hashicorp/hil/check_types.go delete mode 100644 vendor/github.com/hashicorp/hil/convert.go delete mode 100644 vendor/github.com/hashicorp/hil/eval.go delete mode 100644 vendor/github.com/hashicorp/hil/eval_type.go delete mode 100644 vendor/github.com/hashicorp/hil/evaltype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/parse.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/binary_op.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/fuzz.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/peeker.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/token.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/tokentype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/transform_fixed.go delete mode 100644 vendor/github.com/hashicorp/hil/walk.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/cache.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/static.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/disco.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/host.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go delete mode 100644 vendor/github.com/hashicorp/terraform/LICENSE delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/count_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/input_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/instance_key.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/local_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/module.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/module_call.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/module_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/output_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/parse_ref.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/parse_target.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/path_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/provider_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/referenceable.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/resource_phase.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/self.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/targetable.go delete mode 100644 vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/append.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config_terraform.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/config_tree.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/import_tree.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/interpolate_walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/loader.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/loader_hcl.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/loader_hcl2.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/merge.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/providers.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/provisioner_enums.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/raw_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/resource_mode.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/resource_mode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/config/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/compat_shim.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/config.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/config_build.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/schema.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/depends_on.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/experiments.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/hcl2shim/single_attr_body.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/module.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/module_call.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/module_merge.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/module_merge_body.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/named_values.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/parser.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/parser_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/parser_values.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/provider_meta.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/provider_requirements.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/synth_body.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/util.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/configs/version_constraint.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/dag.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/dot.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/edge.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/marshal.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/set.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/tarjan.go delete mode 100644 vendor/github.com/hashicorp/terraform/dag/walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/experiments/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/experiments/errors.go delete mode 100644 vendor/github.com/hashicorp/terraform/experiments/experiment.go delete mode 100644 vendor/github.com/hashicorp/terraform/experiments/set.go delete mode 100644 vendor/github.com/hashicorp/terraform/experiments/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/logging/indent.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/logging/level.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/logging/logging.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/logging/transport.go delete mode 100644 vendor/github.com/hashicorp/terraform/httpclient/client.go delete mode 100644 vendor/github.com/hashicorp/terraform/httpclient/useragent.go delete mode 100644 vendor/github.com/hashicorp/terraform/instances/expander.go delete mode 100644 vendor/github.com/hashicorp/terraform/instances/expansion_mode.go delete mode 100644 vendor/github.com/hashicorp/terraform/instances/instance_key_data.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/errors.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_mirror_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_search.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/hash.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/http_mirror_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/legacy_lookup.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/memoize_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/mock_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/multi_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/package_authentication.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/public_keys.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/registry_client.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/registry_source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/source.go delete mode 100644 vendor/github.com/hashicorp/terraform/internal/getproviders/types.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/data.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/collection.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/number.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/funcs/string.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/functions.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/references.go delete mode 100644 vendor/github.com/hashicorp/terraform/lang/scope.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/action.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/action_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/changes.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/changes_src.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/changes_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/changes_sync.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/dynamic_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/action.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go delete mode 100644 vendor/github.com/hashicorp/terraform/plans/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/find.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/version.go delete mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go delete mode 100644 vendor/github.com/hashicorp/terraform/providers/addressed_types.go delete mode 100644 vendor/github.com/hashicorp/terraform/providers/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/providers/factory.go delete mode 100644 vendor/github.com/hashicorp/terraform/providers/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/provisioners/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/provisioners/factory.go delete mode 100644 vendor/github.com/hashicorp/terraform/provisioners/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/instance_generation.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/instance_object.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/instance_object_src.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/module.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/objectstatus_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/output_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/state_deepcopy.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/state_equal.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/state_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/file.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/read.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version0.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version1.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version2.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version3.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/version4.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/statefile/write.go delete mode 100644 vendor/github.com/hashicorp/terraform/states/sync.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_components.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_import.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_error.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_filter.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_if.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_lang.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_noop.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_read_data_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_read_data_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaluate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/features.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_dot.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/hook_stop.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/instance_expanders.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/instancetype.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_expand.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/provider_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_address.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_mode.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_mode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/schemas.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_filter.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/state_v1.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider_meta.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_expand.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_module_expansion.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_reference.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_root.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_state.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_targets.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/user_agent.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/util.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/variables.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/version.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/version_required.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/contextual.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/error.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/hcl.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/source_range.go delete mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go delete mode 100644 vendor/github.com/hashicorp/terraform/version/version.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/mitchellh/hashstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/hashstructure/README.md delete mode 100644 vendor/github.com/mitchellh/hashstructure/hashstructure.go delete mode 100644 vendor/github.com/mitchellh/hashstructure/include.go delete mode 100644 vendor/github.com/spf13/afero/.gitignore delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/README.md delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/internal/common/adapters.go delete mode 100644 vendor/github.com/spf13/afero/iofs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/symlink.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/.travis.yml delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/LICENSE delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/NOTICE delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/apic.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/converter.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/decode.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/emitterc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/encode.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/error.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/implied_type.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/parserc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/readerc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/resolve.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/scannerc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/writerc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/yaml.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/yamlh.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go delete mode 100644 vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.s delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_noasm.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.s delete mode 100644 vendor/golang.org/x/crypto/chacha20/xor.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/internal/alias/alias.go delete mode 100644 vendor/golang.org/x/crypto/internal/alias/alias_purego.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/poly1305.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go delete mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 vendor/golang.org/x/mod/LICENSE delete mode 100644 vendor/golang.org/x/mod/PATENTS delete mode 100644 vendor/golang.org/x/mod/sumdb/dirhash/hash.go delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go delete mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go delete mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/parse.go delete mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go delete mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/text/runes/cond.go delete mode 100644 vendor/golang.org/x/text/runes/runes.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/go.mod b/go.mod index c3cbc34d..a6259437 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.15 github.com/aws/aws-sdk-go-v2/service/sts v1.18.5 github.com/aws/smithy-go v1.13.5 - github.com/hashicorp/terraform v0.13.0-beta1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.25.0 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.8.0 @@ -16,16 +15,14 @@ require ( k8s.io/apimachinery v0.25.4 k8s.io/client-go v0.25.4 k8s.io/kube-aggregator v0.25.4 + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed ) require ( github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/agext/levenshtein v1.2.2 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/apparentlymart/go-versions v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.31.9 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.15 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.23 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.29 // indirect @@ -34,8 +31,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.23 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.12.4 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.4 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/bmatcuk/doublestar v1.1.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/fatih/color v1.13.0 // indirect @@ -48,19 +43,14 @@ require ( github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/google/uuid v1.1.2 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.4.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.8 // indirect - github.com/hashicorp/go-retryablehttp v0.5.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl/v2 v2.16.1 // indirect - github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-plugin-go v0.14.3 // indirect github.com/hashicorp/terraform-plugin-log v0.8.0 // indirect @@ -74,10 +64,8 @@ require ( github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect - github.com/mitchellh/hashstructure v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -85,31 +73,26 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/run v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/afero v1.9.5 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/zclconf/go-cty v1.12.1 // indirect - github.com/zclconf/go-cty-yaml v1.0.2 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/mod v0.9.0 // indirect golang.org/x/net v0.6.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/oauth2 v0.4.0 // indirect golang.org/x/sys v0.5.0 // indirect golang.org/x/term v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect - google.golang.org/grpc v1.51.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.53.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.70.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.2.0 // indirect diff --git a/go.sum b/go.sum index 9a00ae72..f3777d92 100644 --- a/go.sum +++ b/go.sum @@ -1,112 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v40.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/azure/cli v0.2.0/go.mod h1:WWTbGPvkAg3I4ms2j2s+Zr5xCGwGqTQh+6M2ZqOczkE= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= -github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw= -github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/apparentlymart/go-userdirs v0.0.0-20190512014041-4a23807e62b9/go.mod h1:7kfpUbyCdGJ9fDRCp3fopPQi5+cKNHgTE4ZuNrO71Cw= -github.com/apparentlymart/go-versions v0.0.2-0.20180815153302-64b99f7cb171/go.mod h1:JXY95WvQrPJQtudvNARshgWajS7jNNlM90altXIPNyI= -github.com/apparentlymart/go-versions v1.0.0 h1:4A4CekGuwDUQqc+uTXCrdb9Y98JZsML2sdfNTeVjsK4= -github.com/apparentlymart/go-versions v1.0.0/go.mod h1:YF5j7IQtrOAOnsGkniupEA5bfCjzd7i14yu0shZavyM= -github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.31.9 h1:n+b34ydVfgC30j0Qm69yaapmjejQPW2BoDBX7Uy/tLI= -github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v1.17.5 h1:TzCUW1Nq4H8Xscph5M/skINUitxM5UBAyvm2s7XBzL4= github.com/aws/aws-sdk-go-v2 v1.17.5/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/config v1.18.15 h1:509yMO0pJUGUugBP2H9FOFyV+7Mz7sRR+snfDN5W4NY= @@ -131,63 +35,19 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.18.5 h1:L1600eLr0YvTT7gNh3Ni24yGI7NS github.com/aws/aws-sdk-go-v2/service/sts v1.18.5/go.mod h1:1mKZHLLpDMHTNSYPJ7qrcnCQdHCWsNQaT0xRvq2u80s= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmatcuk/doublestar v1.1.5 h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk= -github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= -github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -200,162 +60,58 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM= -github.com/gophercloud/gophercloud v0.10.1-0.20200424014253-c3bfe50899e5/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/utils v0.0.0-20200423144003-7c72efc7435d/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ= -github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-azure-helpers v0.4.1/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= -github.com/hashicorp/go-azure-helpers v0.10.0/go.mod h1:YuAtHxm2v74s+IjQwUG88dHBJPd5jL+cXr5BGVzSKhE= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 h1:l1KB3bHVdvegcIf5upQ5mjcHjs2qsWnKh4Yr9xgIuu8= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= -github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-slug v0.4.1/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= -github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-tfe v0.8.1/go.mod h1:XAV72S4O1iP8BDaqiaPLmL2B4EE6almocnOn8E8stHc= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= -github.com/hashicorp/hcl/v2 v2.5.1/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/hcl/v2 v2.16.1 h1:BwuxEMD/tsYgbhIW7UuI3crjovf3MzuFWiVgiv57iHg= github.com/hashicorp/hcl/v2 v2.16.1/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI= -github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= -github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= -github.com/hashicorp/terraform v0.13.0-beta1 h1:Z/zlH2ANtXPAlQppvgomSdF6Mf5NmI2hG67tCmU+e2E= -github.com/hashicorp/terraform v0.13.0-beta1/go.mod h1:r32N7N9CsH6sAdWaTdhz6V/O/mk/JFwUrwpOtP+XcXE= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-plugin-go v0.14.3 h1:nlnJ1GXKdMwsC8g1Nh05tK2wsC3+3BL/DBBxFEki+j0= github.com/hashicorp/terraform-plugin-go v0.14.3/go.mod h1:7ees7DMZ263q8wQ6E4RdIdR6nHHJtrdt4ogX5lPkX1A= github.com/hashicorp/terraform-plugin-log v0.8.0 h1:pX2VQ/TGKu+UU1rCay0OlzosNKe4Nz1pepLXj95oyy0= @@ -364,43 +120,21 @@ github.com/hashicorp/terraform-plugin-sdk/v2 v2.25.0 h1:iNRjaJCatQS1rIbHs/vDvJ0G github.com/hashicorp/terraform-plugin-sdk/v2 v2.25.0/go.mod h1:XnVNLIS6bdMJbjSDujhX4Rlk24QpbGKbnrVFM4tZ7OU= github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -410,339 +144,112 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/likexian/gokit v0.0.0-20190309162924-0a377eecf7aa/go.mod h1:QdfYv6y6qPA9pbBA2qXtoT8BMKha6UyNbxWGWl/9Jfk= -github.com/likexian/gokit v0.0.0-20190418170008-ace88ad0983b/go.mod h1:KKqSnk/VVSW8kEyO2vVCXoanzEutKdlBAPohmGXkxCk= -github.com/likexian/gokit v0.0.0-20190501133040-e77ea8b19cdc/go.mod h1:3kvONayqCaj+UgrRZGpgfXzHdMYCAO0KAt4/8n0L57Y= -github.com/likexian/gokit v0.20.15/go.mod h1:kn+nTv3tqh6yhor9BC4Lfiu58SmH8NmQ2PmEl+uM6nU= -github.com/likexian/simplejson-go v0.0.0-20190409170913-40473a74d76d/go.mod h1:Typ1BfnATYtZ/+/shXfFYLrovhFyuKvzwrdOnIDHlmg= -github.com/likexian/simplejson-go v0.0.0-20190419151922-c1f9f0b4f084/go.mod h1:U4O1vIJvIKwbMZKUJ62lppfdvkCdVd2nfMimHK81eec= -github.com/likexian/simplejson-go v0.0.0-20190502021454-d8787b4bfa0b/go.mod h1:3BWwtmKP9cXWwYCr5bkoVDEfLywacOv0s06OBEDpyt8= -github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= -github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= -github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/panicwrap v1.0.0/go.mod h1:pKvZHwWrZowLUzftuFq7coarnxbBXU4aQh3N0BJOeeA= -github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= -github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= -github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c/go.mod h1:wk2XFUg6egk4tSDNZtXeKfe2G6690UVyt163PuUxBZk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tombuildsstuff/giovanni v0.10.1/go.mod h1:WwPhFP2+WnhJzvPYDnsyBab2wOIksMX6xm+Tg+jVvKw= -github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.4.2/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= -github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= -github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190509141414-a5b02f93d862/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -751,202 +258,61 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -955,12 +321,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= k8s.io/apiextensions-apiserver v0.25.4 h1:7hu9pF+xikxQuQZ7/30z/qxIPZc2J1lFElPtr7f+B6U= @@ -978,9 +339,6 @@ k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkI k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE deleted file mode 100644 index 21253788..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go deleted file mode 100644 index 20823af0..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package cidr is a collection of assorted utilities for computing -// network and host addresses within network ranges. -// -// It expects a CIDR-type address structure where addresses are divided into -// some number of prefix bits representing the network and then the remaining -// suffix bits represent the host. -// -// For example, it can help to calculate addresses for sub-networks of a -// parent network, or to calculate host addresses within a particular prefix. -// -// At present this package is prioritizing simplicity of implementation and -// de-prioritizing speed and memory usage. Thus caution is advised before -// using this package in performance-critical applications or hot codepaths. -// Patches to improve the speed and memory usage may be accepted as long as -// they do not result in a significant increase in code complexity. -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -// Subnet takes a parent CIDR range and creates a subnet within it -// with the given number of additional prefix bits and the given -// network number. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number -// of 5, becomes 10.3.5.0/24 . -func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { - return SubnetBig(base, newBits, big.NewInt(int64(num))) -} - -// SubnetBig takes a parent CIDR range and creates a subnet within it with the -// given number of additional prefix bits and the given network number. It -// differs from Subnet in that it takes a *big.Int for the num, instead of an int. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number of 5, -// becomes 10.3.5.0/24 . -func SubnetBig(base *net.IPNet, newBits int, num *big.Int) (*net.IPNet, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - newPrefixLen := parentLen + newBits - - if newPrefixLen > addrLen { - return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) - } - - maxNetNum := uint64(1< maxNetNum { - return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) - } - - return &net.IPNet{ - IP: insertNumIntoIP(ip, num, newPrefixLen), - Mask: net.CIDRMask(newPrefixLen, addrLen), - }, nil -} - -// Host takes a parent CIDR range and turns it into a host IP address with the -// given host number. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func Host(base *net.IPNet, num int) (net.IP, error) { - return HostBig(base, big.NewInt(int64(num))) -} - -// HostBig takes a parent CIDR range and turns it into a host IP address with -// the given host number. It differs from Host in that it takes a *big.Int for -// the num, instead of an int. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func HostBig(base *net.IPNet, num *big.Int) (net.IP, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - hostLen := addrLen - parentLen - - maxHostNum := big.NewInt(int64(1)) - maxHostNum.Lsh(maxHostNum, uint(hostLen)) - maxHostNum.Sub(maxHostNum, big.NewInt(1)) - - numUint64 := big.NewInt(int64(num.Uint64())) - if num.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(num) - numUint64.Sub(numUint64, big.NewInt(int64(1))) - num.Sub(maxHostNum, numUint64) - } - - if numUint64.Cmp(maxHostNum) == 1 { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) - } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, num, bitlength), nil -} - -// AddressRange returns the first and last addresses in the given CIDR range. -func AddressRange(network *net.IPNet) (net.IP, net.IP) { - // the first IP is easy - firstIP := network.IP - - // the last IP is the network address OR NOT the mask address - prefixLen, bits := network.Mask.Size() - if prefixLen == bits { - // Easy! - // But make sure that our two slices are distinct, since they - // would be in all other cases. - lastIP := make([]byte, len(firstIP)) - copy(lastIP, firstIP) - return firstIP, lastIP - } - - firstIPInt, bits := ipToInt(firstIP) - hostLen := uint(bits) - uint(prefixLen) - lastIPInt := big.NewInt(1) - lastIPInt.Lsh(lastIPInt, hostLen) - lastIPInt.Sub(lastIPInt, big.NewInt(1)) - lastIPInt.Or(lastIPInt, firstIPInt) - - return firstIP, intToIP(lastIPInt, bits) -} - -// AddressCount returns the number of distinct host addresses within the given -// CIDR range. -// -// Since the result is a uint64, this function returns meaningful information -// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. -func AddressCount(network *net.IPNet) uint64 { - prefixLen, bits := network.Mask.Size() - return 1 << (uint64(bits) - uint64(prefixLen)) -} - -//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies -//none of the subnets overlap and all subnets are in the supernet -//it returns an error if any of those conditions are not satisfied -func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { - firstLastIP := make([][]net.IP, len(subnets)) - for i, s := range subnets { - first, last := AddressRange(s) - firstLastIP[i] = []net.IP{first, last} - } - for i, s := range subnets { - if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { - return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) - } - for j := 0; j < len(subnets); j++ { - if i == j { - continue - } - - first := firstLastIP[j][0] - last := firstLastIP[j][1] - if s.Contains(first) || s.Contains(last) { - return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) - } - } - } - return nil -} - -// PreviousSubnet returns the subnet of the desired mask in the IP space -// just lower than the start of IPNet provided. If the IP space rolls over -// then the second return value is true -func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - startIP := checkIPv4(network.IP) - previousIP := make(net.IP, len(startIP)) - copy(previousIP, startIP) - cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) - previousIP = Dec(previousIP) - previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} - if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { - return previous, true - } - return previous, false -} - -// NextSubnet returns the next available subnet of the desired mask size -// starting for the maximum IP of the offset subnet -// If the IP exceeds the maxium IP then the second return value is true -func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - _, currentLast := AddressRange(network) - mask := net.CIDRMask(prefixLen, 8*len(currentLast)) - currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} - _, last := AddressRange(currentSubnet) - last = Inc(last) - next := &net.IPNet{IP: last.Mask(mask), Mask: mask} - if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { - return next, true - } - return next, false -} - -//Inc increases the IP by one this returns a new []byte for the IP -func Inc(IP net.IP) net.IP { - IP = checkIPv4(IP) - incIP := make([]byte, len(IP)) - copy(incIP, IP) - for j := len(incIP) - 1; j >= 0; j-- { - incIP[j]++ - if incIP[j] > 0 { - break - } - } - return incIP -} - -//Dec decreases the IP by one this returns a new []byte for the IP -func Dec(IP net.IP) net.IP { - IP = checkIPv4(IP) - decIP := make([]byte, len(IP)) - copy(decIP, IP) - decIP = checkIPv4(decIP) - for j := len(decIP) - 1; j >= 0; j-- { - decIP[j]-- - if decIP[j] < 255 { - break - } - } - return decIP -} - -func checkIPv4(ip net.IP) net.IP { - // Go for some reason allocs IPv6len for IPv4 so we have to correct it - if v4 := ip.To4(); v4 != nil { - return v4 - } - return ip -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go deleted file mode 100644 index e5e6a2cf..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ /dev/null @@ -1,37 +0,0 @@ -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -func ipToInt(ip net.IP) (*big.Int, int) { - val := &big.Int{} - val.SetBytes([]byte(ip)) - if len(ip) == net.IPv4len { - return val, 32 - } else if len(ip) == net.IPv6len { - return val, 128 - } else { - panic(fmt.Errorf("Unsupported address length %d", len(ip))) - } -} - -func intToIP(ipInt *big.Int, bits int) net.IP { - ipBytes := ipInt.Bytes() - ret := make([]byte, bits/8) - // Pack our IP bytes into the end of the return array, - // since big.Int.Bytes() removes front zero padding. - for i := 1; i <= len(ipBytes); i++ { - ret[len(ret)-i] = ipBytes[len(ipBytes)-i] - } - return net.IP(ret) -} - -func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { - ipInt, totalBits := ipToInt(ip) - bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) - ipInt.Or(ipInt, bigNum) - return intToIP(ipInt, totalBits) -} diff --git a/vendor/github.com/apparentlymart/go-versions/LICENSE b/vendor/github.com/apparentlymart/go-versions/LICENSE deleted file mode 100644 index 83fe416b..00000000 --- a/vendor/github.com/apparentlymart/go-versions/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/canon_style.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/canon_style.go deleted file mode 100644 index aa5e87cb..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/canon_style.go +++ /dev/null @@ -1,352 +0,0 @@ -package constraints - -import ( - "fmt" - "strings" -) - -// Parse parses a constraint string using a syntax similar to that used by -// npm, Go "dep", Rust's "cargo", etc. Exact compatibility with any of these -// systems is not guaranteed, but instead we aim for familiarity in the choice -// of operators and their meanings. The syntax described here is considered the -// canonical syntax for this package, but a Ruby-style syntax is also offered -// via the function "ParseRubyStyle". -// -// A constraint string is a sequence of selection sets delimited by ||, with -// each selection set being a whitespace-delimited sequence of selections. -// Each selection is then the combination of a matching operator and a boundary -// version. The following is an example of a complex constraint string -// illustrating all of these features: -// -// >=1.0.0 <2.0.0 || 1.0.0-beta1 || =2.0.2 -// -// In practice constraint strings are usually simpler than this, but this -// complex example allows us to identify each of the parts by example: -// -// Selection Sets: ">=1.0.0 <2.0.0" -// "1.0.0-beta1" -// "=2.0.2" -// Selections: ">=1.0.0" -// "<2.0.0" -// "1.0.0-beta1" -// "=2.0.2" -// Matching Operators: ">=", "<", "=" are explicit operators -// "1.0.0-beta1" has an implicit "=" operator -// Boundary Versions: "1.0.0", "2.0.0", "1.0.0-beta1", "2.0.2" -// -// A constraint string describes the members of a version set by adding exact -// versions or ranges of versions to that set. A version is in the set if -// any one of the selection sets match that version. A selection set matches -// a version if all of its selections match that version. A selection matches -// a version if the version has the indicated relationship with the given -// boundary version. -// -// In the above example, the first selection set matches all released versions -// whose major segment is 1, since both selections must apply. However, the -// remaining two selection sets describe two specific versions outside of that -// range that are also admitted, in addition to those in the indicated range. -// -// The available matching operators are: -// -// < Less than -// <= Less than or equal -// > Greater than -// >= Greater than or equal -// = Equal -// ! Not equal -// ~ Greater than with implied upper limit (described below) -// ^ Greater than excluding new major releases (described below) -// -// If no operator is specified, the operator is implied to be "equal" for a -// full version specification, or a special additional "match" operator for -// a version containing wildcards as described below. -// -// The "~" matching operator is a shorthand for expressing both a lower and -// upper limit within a single expression. The effect of this operator depends -// on how many segments are specified in the boundary version: if only one -// segment is specified then new minor and patch versions are accepted, whereas -// if two or three segments are specified then only patch versions are accepted. -// For example: -// -// ~1 is equivalent to >=1.0.0 <2.0.0 -// ~1.0 is equivalent to >=1.0.0 <1.1.0 -// ~1.2 is equivalent to >=1.2.0 <1.3.0 -// ~1.2.0 is equivalent to >=1.2.0 <1.3.0 -// ~1.2.3 is equivalent to >=1.2.3 <1.3.0 -// -// The "^" matching operator is similar to "~" except that it always constrains -// only the major version number. It has an additional special behavior for -// when the major version number is zero: in that case, the minor release -// number is constrained, reflecting the common semver convention that initial -// development releases mark breaking changes by incrementing the minor version. -// For example: -// -// ^1 is equivalent to >=1.0.0 <2.0.0 -// ^1.2 is equivalent to >=1.2.0 <2.0.0 -// ^1.2.3 is equivalent to >=1.2.3 <2.0.0 -// ^0.1.0 is equivalent to >=0.1.0 <0.2.0 -// ^0.1.2 is equivalent to >=0.1.2 <0.2.0 -// -// The boundary version can contain wildcards for the major, minor or patch -// segments, which are specified using the markers "*", "x", or "X". When used -// in a selection with no explicit operator, these specify the implied "match" -// operator and define ranges with similar meaning to the "~" and "^" operators: -// -// 1.* is equivalent to >=1.0.0 <2.0.0 -// 1.*.* is equivalent to >=1.0.0 <2.0.0 -// 1.0.* is equivalent to >=1.0.0 <1.1.0 -// -// When wildcards are used, the first segment specified as a wildcard implies -// that all of the following segments are also wildcards. A version -// specification like "1.*.2" is invalid, because a wildcard minor version -// implies that the patch version must also be a wildcard. -// -// Wildcards have no special meaning when used with explicit operators, and so -// they are merely replaced with zeros in such cases. -// -// Explicit range syntax using a hyphen creates inclusive upper and lower -// bounds: -// -// 1.0.0 - 2.0.0 is equivalent to >=1.0.0 <=2.0.0 -// 1.2.3 - 2.3.4 is equivalent to >=1.2.3 <=2.3.4 -// -// Requests of exact pre-release versions with the equals operator have -// no special meaning to the constraint parser, but are interpreted as explicit -// requests for those versions when interpreted by the MeetingConstraints -// function (and related functions) in the "versions" package, in the parent -// directory. Pre-release versions that are not explicitly requested are -// excluded from selection so that e.g. "^1.0.0" will not match a version -// "2.0.0-beta.1". -// -// The result is always a UnionSpec, whose members are IntersectionSpecs -// each describing one selection set. In the common case where a string -// contains only one selection, both the UnionSpec and the IntersectionSpec -// will have only one element and can thus be effectively ignored by the -// caller. (Union and intersection of single sets are both no-op.) -// A valid string must contain at least one selection; if an empty selection -// is to be considered as either "no versions" or "all versions" then this -// special case must be handled by the caller prior to calling this function. -// -// If there are syntax errors or ambiguities in the provided string then an -// error is returned. All errors returned by this function are suitable for -// display to English-speaking end-users, and avoid any Go-specific -// terminology. -func Parse(str string) (UnionSpec, error) { - str = strings.TrimSpace(str) - - if str == "" { - return nil, fmt.Errorf("empty specification") - } - - // Most constraint strings contain only one selection, so we'll - // allocate under that assumption and re-allocate if needed. - uspec := make(UnionSpec, 0, 1) - ispec := make(IntersectionSpec, 0, 1) - - remain := str - for { - var selection SelectionSpec - var err error - selection, remain, err = parseSelection(remain) - if err != nil { - return nil, err - } - - remain = strings.TrimSpace(remain) - - if len(remain) > 0 && remain[0] == '-' { - // Looks like user wants to make a range expression, so we'll - // look for another selection. - remain = strings.TrimSpace(remain[1:]) - if remain == "" { - return nil, fmt.Errorf(`operator "-" must be followed by another version selection to specify the upper limit of the range`) - } - - var lower, upper SelectionSpec - lower = selection - upper, remain, err = parseSelection(remain) - remain = strings.TrimSpace(remain) - if err != nil { - return nil, err - } - - if lower.Operator != OpUnconstrained { - return nil, fmt.Errorf(`lower bound of range specified with "-" operator must be an exact version`) - } - if upper.Operator != OpUnconstrained { - return nil, fmt.Errorf(`upper bound of range specified with "-" operator must be an exact version`) - } - - lower.Operator = OpGreaterThanOrEqual - lower.Boundary = lower.Boundary.ConstrainToZero() - if upper.Boundary.IsExact() { - upper.Operator = OpLessThanOrEqual - } else { - upper.Operator = OpLessThan - upper.Boundary = upper.Boundary.ConstrainToUpperBound() - } - ispec = append(ispec, lower, upper) - } else { - if selection.Operator == OpUnconstrained { - // Select a default operator based on whether the version - // specification contains wildcards. - if selection.Boundary.IsExact() { - selection.Operator = OpEqual - } else { - selection.Operator = OpMatch - } - } - if selection.Operator != OpMatch { - switch selection.Operator { - case OpMatch: - // nothing to do - case OpLessThanOrEqual: - if !selection.Boundary.IsExact() { - selection.Operator = OpLessThan - selection.Boundary = selection.Boundary.ConstrainToUpperBound() - } - case OpGreaterThan: - if !selection.Boundary.IsExact() { - // If "greater than" has an imprecise boundary then we'll - // turn it into a "greater than or equal to" and use the - // upper bound of the boundary, so e.g.: - // >1.*.* means >=2.0.0, because that's greater than - // everything matched by 1.*.*. - selection.Operator = OpGreaterThanOrEqual - selection.Boundary = selection.Boundary.ConstrainToUpperBound() - } - default: - selection.Boundary = selection.Boundary.ConstrainToZero() - } - } - ispec = append(ispec, selection) - } - - if len(remain) == 0 { - // All done! - break - } - - if remain[0] == ',' { - return nil, fmt.Errorf(`commas are not needed to separate version selections; separate with spaces instead`) - } - - if remain[0] == '|' { - if !strings.HasPrefix(remain, "||") { - // User was probably trying for "||", so we'll produce a specialized error - return nil, fmt.Errorf(`single "|" is not a valid operator; did you mean "||" to specify an alternative?`) - } - remain = strings.TrimSpace(remain[2:]) - if remain == "" { - return nil, fmt.Errorf(`operator "||" must be followed by another version selection`) - } - - // Begin a new IntersectionSpec, added to our single UnionSpec - uspec = append(uspec, ispec) - ispec = make(IntersectionSpec, 0, 1) - } - } - - uspec = append(uspec, ispec) - - return uspec, nil -} - -// parseSelection parses one canon-style selection from the prefix of the -// given string, returning the result along with the remaining unconsumed -// string for the caller to use for further processing. -func parseSelection(str string) (SelectionSpec, string, error) { - raw, remain := scanConstraint(str) - var spec SelectionSpec - - if len(str) == len(remain) { - if len(remain) > 0 && remain[0] == 'v' { - // User seems to be trying to use a "v" prefix, like "v1.0.0" - return spec, remain, fmt.Errorf(`a "v" prefix should not be used when specifying versions`) - } - - // If we made no progress at all then the selection must be entirely invalid. - return spec, remain, fmt.Errorf("the sequence %q is not valid", remain) - } - - switch raw.op { - case "": - // We'll deal with this situation in the caller - spec.Operator = OpUnconstrained - case "=": - spec.Operator = OpEqual - case "!": - spec.Operator = OpNotEqual - case ">": - spec.Operator = OpGreaterThan - case ">=": - spec.Operator = OpGreaterThanOrEqual - case "<": - spec.Operator = OpLessThan - case "<=": - spec.Operator = OpLessThanOrEqual - case "~": - if raw.numCt > 1 { - spec.Operator = OpGreaterThanOrEqualPatchOnly - } else { - spec.Operator = OpGreaterThanOrEqualMinorOnly - } - case "^": - if len(raw.nums[0]) > 0 && raw.nums[0][0] == '0' { - // Special case for major version 0, which is initial development: - // we treat the minor number as if it's the major number. - spec.Operator = OpGreaterThanOrEqualPatchOnly - } else { - spec.Operator = OpGreaterThanOrEqualMinorOnly - } - case "=<": - return spec, remain, fmt.Errorf("invalid constraint operator %q; did you mean \"<=\"?", raw.op) - case "=>": - return spec, remain, fmt.Errorf("invalid constraint operator %q; did you mean \">=\"?", raw.op) - default: - return spec, remain, fmt.Errorf("invalid constraint operator %q", raw.op) - } - - if raw.sep != "" { - return spec, remain, fmt.Errorf("no spaces allowed after operator %q", raw.op) - } - - if raw.numCt > 3 { - return spec, remain, fmt.Errorf("too many numbered portions; only three are allowed (major, minor, patch)") - } - - // Unspecified portions are either zero or wildcard depending on whether - // any explicit wildcards are present. - seenWild := false - for i, s := range raw.nums { - switch { - case isWildcardNum(s): - seenWild = true - case i >= raw.numCt: - if seenWild { - raw.nums[i] = "*" - } else { - raw.nums[i] = "0" - } - default: - // If we find a non-wildcard after we've already seen a wildcard - // then this specification is inconsistent, which is an error. - if seenWild { - return spec, remain, fmt.Errorf("can't use exact %s segment after a previous segment was wildcard", rawNumNames[i]) - } - } - } - - if seenWild { - if raw.pre != "" { - return spec, remain, fmt.Errorf(`can't use prerelease segment (introduced by "-") in a version with wildcards`) - } - if raw.meta != "" { - return spec, remain, fmt.Errorf(`can't use build metadata segment (introduced by "+") in a version with wildcards`) - } - } - - spec.Boundary = raw.VersionSpec() - - return spec, remain, nil -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/constraintdepth_string.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/constraintdepth_string.go deleted file mode 100644 index 0f808f91..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/constraintdepth_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type ConstraintDepth"; DO NOT EDIT. - -package constraints - -import "strconv" - -const _ConstraintDepth_name = "UnconstrainedConstrainedMajorConstrainedMinorConstrainedPatch" - -var _ConstraintDepth_index = [...]uint8{0, 13, 29, 45, 61} - -func (i ConstraintDepth) String() string { - if i < 0 || i >= ConstraintDepth(len(_ConstraintDepth_index)-1) { - return "ConstraintDepth(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ConstraintDepth_name[_ConstraintDepth_index[i]:_ConstraintDepth_index[i+1]] -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/doc.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/doc.go deleted file mode 100644 index 17b8b90f..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package constraints contains a high-level representation of version -// constraints that retains enough information for direct analysis and -// serialization as a string. -// -// The package also contains parsers to produce that representation from -// various compact constraint specification formats. -// -// The main "versions" package, available in the parent directory, can consume -// the high-level constraint representation from this package to construct -// a version set that contains all versions meeting the given constraints. -// Package "constraints" does not contain any functionalty for checking versions -// against constraints since that is provided by package "versions". -package constraints diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw.go deleted file mode 100644 index fdea80a3..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw.go +++ /dev/null @@ -1,74 +0,0 @@ -package constraints - -import ( - "strconv" -) - -//go:generate ragel -G1 -Z raw_scan.rl -//go:generate gofmt -w raw_scan.go - -// rawConstraint is a tokenization of a constraint string, used internally -// as the first layer of parsing. -type rawConstraint struct { - op string - sep string - nums [3]string - numCt int - pre string - meta string -} - -// VersionSpec turns the receiver into a VersionSpec in a reasonable -// default way. This method assumes that the raw constraint was already -// validated, and will panic or produce undefined results if it contains -// anything invalid. -// -// In particular, numbers are automatically marked as unconstrained if they -// are omitted or set to wildcards, so the caller must apply any additional -// validation rules on the usage of unconstrained numbers before calling. -func (raw rawConstraint) VersionSpec() VersionSpec { - return VersionSpec{ - Major: parseRawNumConstraint(raw.nums[0]), - Minor: parseRawNumConstraint(raw.nums[1]), - Patch: parseRawNumConstraint(raw.nums[2]), - Prerelease: raw.pre, - Metadata: raw.meta, - } -} - -var rawNumNames = [...]string{"major", "minor", "patch"} - -func isWildcardNum(s string) bool { - switch s { - case "*", "x", "X": - return true - default: - return false - } -} - -// parseRawNum parses a raw number string which the caller has already -// determined is non-empty and non-wildcard. If the string is not numeric -// then this function will panic. -func parseRawNum(s string) uint64 { - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - panic(err) - } - return v -} - -// parseRawNumConstraint parses a raw number into a NumConstraint, setting it -// to unconstrained if the value is empty or a wildcard. -func parseRawNumConstraint(s string) NumConstraint { - switch { - case s == "" || isWildcardNum(s): - return NumConstraint{ - Unconstrained: true, - } - default: - return NumConstraint{ - Num: parseRawNum(s), - } - } -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.go deleted file mode 100644 index 2f7a81c4..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.go +++ /dev/null @@ -1,623 +0,0 @@ -// line 1 "raw_scan.rl" -// This file is generated from raw_scan.rl. DO NOT EDIT. - -// line 5 "raw_scan.rl" - -package constraints - -// line 12 "raw_scan.go" -var _scan_eof_actions []byte = []byte{ - 0, 1, 1, 7, 9, 9, 9, 11, - 14, 15, 11, -} - -const scan_start int = 1 -const scan_first_final int = 7 -const scan_error int = 0 - -const scan_en_main int = 1 - -// line 11 "raw_scan.rl" - -func scanConstraint(data string) (rawConstraint, string) { - var constraint rawConstraint - var numIdx int - var extra string - - // Ragel state - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - cs := 0 // constraint state (will be initialized by ragel-generated code) - ts := 0 - te := 0 - eof := pe - - // Keep Go compiler happy even if generated code doesn't use these - _ = ts - _ = te - _ = eof - - // line 47 "raw_scan.go" - { - cs = scan_start - } - - // line 52 "raw_scan.go" - { - if p == pe { - goto _test_eof - } - if cs == 0 { - goto _out - } - _resume: - switch cs { - case 1: - switch data[p] { - case 32: - goto tr1 - case 42: - goto tr2 - case 46: - goto tr3 - case 88: - goto tr2 - case 120: - goto tr2 - } - switch { - case data[p] < 48: - if 9 <= data[p] && data[p] <= 13 { - goto tr1 - } - case data[p] > 57: - switch { - case data[p] > 90: - if 97 <= data[p] && data[p] <= 122 { - goto tr3 - } - case data[p] >= 65: - goto tr3 - } - default: - goto tr4 - } - goto tr0 - case 2: - switch data[p] { - case 32: - goto tr6 - case 42: - goto tr7 - case 46: - goto tr3 - case 88: - goto tr7 - case 120: - goto tr7 - } - switch { - case data[p] < 48: - if 9 <= data[p] && data[p] <= 13 { - goto tr6 - } - case data[p] > 57: - switch { - case data[p] > 90: - if 97 <= data[p] && data[p] <= 122 { - goto tr3 - } - case data[p] >= 65: - goto tr3 - } - default: - goto tr8 - } - goto tr5 - case 3: - switch data[p] { - case 32: - goto tr10 - case 42: - goto tr11 - case 88: - goto tr11 - case 120: - goto tr11 - } - switch { - case data[p] > 13: - if 48 <= data[p] && data[p] <= 57 { - goto tr12 - } - case data[p] >= 9: - goto tr10 - } - goto tr9 - case 0: - goto _out - case 7: - switch data[p] { - case 43: - goto tr19 - case 45: - goto tr20 - case 46: - goto tr21 - } - goto tr18 - case 4: - switch { - case data[p] < 48: - if 45 <= data[p] && data[p] <= 46 { - goto tr14 - } - case data[p] > 57: - switch { - case data[p] > 90: - if 97 <= data[p] && data[p] <= 122 { - goto tr14 - } - case data[p] >= 65: - goto tr14 - } - default: - goto tr14 - } - goto tr13 - case 8: - switch { - case data[p] < 48: - if 45 <= data[p] && data[p] <= 46 { - goto tr14 - } - case data[p] > 57: - switch { - case data[p] > 90: - if 97 <= data[p] && data[p] <= 122 { - goto tr14 - } - case data[p] >= 65: - goto tr14 - } - default: - goto tr14 - } - goto tr22 - case 5: - switch { - case data[p] < 48: - if 45 <= data[p] && data[p] <= 46 { - goto tr15 - } - case data[p] > 57: - switch { - case data[p] > 90: - if 97 <= data[p] && data[p] <= 122 { - goto tr15 - } - case data[p] >= 65: - goto tr15 - } - default: - goto tr15 - } - goto tr13 - case 9: - if data[p] == 43 { - goto tr24 - } - switch { - case data[p] < 48: - if 45 <= data[p] && data[p] <= 46 { - goto tr15 - } - case data[p] > 57: - switch { - case data[p] > 90: - if 97 <= data[p] && data[p] <= 122 { - goto tr15 - } - case data[p] >= 65: - goto tr15 - } - default: - goto tr15 - } - goto tr23 - case 6: - switch data[p] { - case 42: - goto tr16 - case 88: - goto tr16 - case 120: - goto tr16 - } - if 48 <= data[p] && data[p] <= 57 { - goto tr17 - } - goto tr13 - case 10: - switch data[p] { - case 43: - goto tr19 - case 45: - goto tr20 - case 46: - goto tr21 - } - if 48 <= data[p] && data[p] <= 57 { - goto tr25 - } - goto tr18 - } - - tr3: - cs = 0 - goto f0 - tr9: - cs = 0 - goto f6 - tr13: - cs = 0 - goto f8 - tr18: - cs = 0 - goto f10 - tr22: - cs = 0 - goto f13 - tr23: - cs = 0 - goto f14 - tr5: - cs = 2 - goto _again - tr0: - cs = 2 - goto f1 - tr10: - cs = 3 - goto _again - tr1: - cs = 3 - goto f2 - tr6: - cs = 3 - goto f4 - tr19: - cs = 4 - goto f11 - tr24: - cs = 4 - goto f15 - tr20: - cs = 5 - goto f11 - tr21: - cs = 6 - goto f12 - tr2: - cs = 7 - goto f3 - tr7: - cs = 7 - goto f5 - tr11: - cs = 7 - goto f7 - tr16: - cs = 7 - goto f9 - tr14: - cs = 8 - goto _again - tr15: - cs = 9 - goto _again - tr25: - cs = 10 - goto _again - tr4: - cs = 10 - goto f3 - tr8: - cs = 10 - goto f5 - tr12: - cs = 10 - goto f7 - tr17: - cs = 10 - goto f9 - - f9: - // line 38 "raw_scan.rl" - - ts = p - - goto _again - f12: - // line 52 "raw_scan.rl" - - te = p - constraint.numCt++ - if numIdx < len(constraint.nums) { - constraint.nums[numIdx] = data[ts:p] - numIdx++ - } - - goto _again - f8: - // line 71 "raw_scan.rl" - - extra = data[p:] - - goto _again - f1: - // line 33 "raw_scan.rl" - - numIdx = 0 - constraint = rawConstraint{} - - // line 38 "raw_scan.rl" - - ts = p - - goto _again - f4: - // line 42 "raw_scan.rl" - - te = p - constraint.op = data[ts:p] - - // line 38 "raw_scan.rl" - - ts = p - - goto _again - f7: - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - // line 38 "raw_scan.rl" - - ts = p - - goto _again - f6: - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - goto _again - f11: - // line 52 "raw_scan.rl" - - te = p - constraint.numCt++ - if numIdx < len(constraint.nums) { - constraint.nums[numIdx] = data[ts:p] - numIdx++ - } - - // line 38 "raw_scan.rl" - - ts = p - - goto _again - f10: - // line 52 "raw_scan.rl" - - te = p - constraint.numCt++ - if numIdx < len(constraint.nums) { - constraint.nums[numIdx] = data[ts:p] - numIdx++ - } - - // line 71 "raw_scan.rl" - - extra = data[p:] - - goto _again - f15: - // line 61 "raw_scan.rl" - - te = p - constraint.pre = data[ts+1 : p] - - // line 38 "raw_scan.rl" - - ts = p - - goto _again - f14: - // line 61 "raw_scan.rl" - - te = p - constraint.pre = data[ts+1 : p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - goto _again - f13: - // line 66 "raw_scan.rl" - - te = p - constraint.meta = data[ts+1 : p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - goto _again - f2: - // line 33 "raw_scan.rl" - - numIdx = 0 - constraint = rawConstraint{} - - // line 38 "raw_scan.rl" - - ts = p - - // line 42 "raw_scan.rl" - - te = p - constraint.op = data[ts:p] - - goto _again - f5: - // line 42 "raw_scan.rl" - - te = p - constraint.op = data[ts:p] - - // line 38 "raw_scan.rl" - - ts = p - - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - goto _again - f0: - // line 42 "raw_scan.rl" - - te = p - constraint.op = data[ts:p] - - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - goto _again - f3: - // line 33 "raw_scan.rl" - - numIdx = 0 - constraint = rawConstraint{} - - // line 38 "raw_scan.rl" - - ts = p - - // line 42 "raw_scan.rl" - - te = p - constraint.op = data[ts:p] - - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - goto _again - - _again: - if cs == 0 { - goto _out - } - if p++; p != pe { - goto _resume - } - _test_eof: - { - } - if p == eof { - switch _scan_eof_actions[cs] { - case 9: - // line 71 "raw_scan.rl" - - extra = data[p:] - - case 7: - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - case 11: - // line 52 "raw_scan.rl" - - te = p - constraint.numCt++ - if numIdx < len(constraint.nums) { - constraint.nums[numIdx] = data[ts:p] - numIdx++ - } - - // line 71 "raw_scan.rl" - - extra = data[p:] - - case 15: - // line 61 "raw_scan.rl" - - te = p - constraint.pre = data[ts+1 : p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - case 14: - // line 66 "raw_scan.rl" - - te = p - constraint.meta = data[ts+1 : p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - case 1: - // line 42 "raw_scan.rl" - - te = p - constraint.op = data[ts:p] - - // line 47 "raw_scan.rl" - - te = p - constraint.sep = data[ts:p] - - // line 71 "raw_scan.rl" - - extra = data[p:] - - // line 610 "raw_scan.go" - } - } - - _out: - { - } - } - - // line 92 "raw_scan.rl" - - return constraint, extra -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.rl b/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.rl deleted file mode 100644 index da2151da..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/raw_scan.rl +++ /dev/null @@ -1,95 +0,0 @@ -// This file is generated from raw_scan.rl. DO NOT EDIT. -%%{ - # (except you are actually in raw_scan.rl here, so edit away!) - machine scan; -}%% - -package constraints - -%%{ - write data; -}%% - -func scanConstraint(data string) (rawConstraint, string) { - var constraint rawConstraint - var numIdx int - var extra string - - // Ragel state - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - cs := 0 // constraint state (will be initialized by ragel-generated code) - ts := 0 - te := 0 - eof := pe - - // Keep Go compiler happy even if generated code doesn't use these - _ = ts - _ = te - _ = eof - - %%{ - - action enterConstraint { - numIdx = 0 - constraint = rawConstraint{} - } - - action ts { - ts = p - } - - action finishOp { - te = p - constraint.op = data[ts:p] - } - - action finishSep { - te = p - constraint.sep = data[ts:p] - } - - action finishNum { - te = p - constraint.numCt++ - if numIdx < len(constraint.nums) { - constraint.nums[numIdx] = data[ts:p] - numIdx++ - } - } - - action finishPre { - te = p - constraint.pre = data[ts+1:p] - } - - action finishMeta { - te = p - constraint.meta = data[ts+1:p] - } - - action finishExtra { - extra = data[p:] - } - - num = (digit+ | '*' | 'x' | 'X') >ts %finishNum %err(finishNum) %eof(finishNum); - - op = ((any - (digit | space | alpha | '.' | '*'))**) >ts %finishOp %err(finishOp) %eof(finishOp); - likelyOp = ('^' | '>' | '<' | '-' | '~' | '!'); - sep = (space**) >ts %finishSep %err(finishSep) %eof(finishSep); - nums = (num ('.' num)*); - extraStr = (alnum | '.' | '-')+; - pre = ('-' extraStr) >ts %finishPre %err(finishPre) %eof(finishPre); - meta = ('+' extraStr) >ts %finishMeta %err(finishMeta) %eof(finishMeta); - - constraint = (op sep nums pre? meta?) >enterConstraint; - - main := (constraint) @/finishExtra %/finishExtra $!finishExtra; - - write init; - write exec; - - }%% - - return constraint, extra -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/ruby_style.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/ruby_style.go deleted file mode 100644 index 6e9fc354..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/ruby_style.go +++ /dev/null @@ -1,181 +0,0 @@ -package constraints - -import ( - "fmt" - "strings" -) - -// ParseRubyStyle parses a single selection constraint using a syntax similar -// to that used by rubygems and other Ruby tools. -// -// Exact compatibility with rubygems is not guaranteed; "ruby-style" here -// just means that users familiar with rubygems should find familiar the choice -// of operators and their meanings. -// -// ParseRubyStyle parses only a single specification, mimicking the usual -// rubygems approach of providing each selection as a separate string. -// The result can be combined with other results to create an IntersectionSpec -// that describes the effect of multiple such constraints. -func ParseRubyStyle(str string) (SelectionSpec, error) { - if strings.TrimSpace(str) == "" { - return SelectionSpec{}, fmt.Errorf("empty specification") - } - spec, remain, err := parseRubyStyle(str) - if err != nil { - return spec, err - } - if remain != "" { - remain = strings.TrimSpace(remain) - switch { - case remain == "": - return spec, fmt.Errorf("extraneous spaces at end of specification") - case strings.HasPrefix(remain, "v"): - // User seems to be trying to use a "v" prefix, like "v1.0.0" - return spec, fmt.Errorf(`a "v" prefix should not be used`) - case strings.HasPrefix(remain, "||") || strings.HasPrefix(remain, ","): - // User seems to be trying to specify multiple constraints - return spec, fmt.Errorf(`only one constraint may be specified`) - case strings.HasPrefix(remain, "-"): - // User seems to be trying to use npm-style range constraints - return spec, fmt.Errorf(`range constraints are not supported`) - default: - return spec, fmt.Errorf("invalid characters %q", remain) - } - } - - return spec, nil -} - -// ParseRubyStyleAll is a helper wrapper around ParseRubyStyle that accepts -// multiple selection strings and combines them together into a single -// IntersectionSpec. -func ParseRubyStyleAll(strs ...string) (IntersectionSpec, error) { - spec := make(IntersectionSpec, 0, len(strs)) - for _, str := range strs { - subSpec, err := ParseRubyStyle(str) - if err != nil { - return nil, fmt.Errorf("invalid specification %q: %s", str, err) - } - spec = append(spec, subSpec) - } - return spec, nil -} - -// ParseRubyStyleMulti is similar to ParseRubyStyle, but rather than parsing -// only a single selection specification it instead expects one or more -// comma-separated specifications, returning the result as an -// IntersectionSpec. -func ParseRubyStyleMulti(str string) (IntersectionSpec, error) { - var spec IntersectionSpec - remain := strings.TrimSpace(str) - for remain != "" { - if strings.TrimSpace(remain) == "" { - break - } - - var subSpec SelectionSpec - var err error - var newRemain string - subSpec, newRemain, err = parseRubyStyle(remain) - consumed := remain[:len(remain)-len(newRemain)] - if err != nil { - return nil, fmt.Errorf("invalid specification %q: %s", consumed, err) - } - remain = strings.TrimSpace(newRemain) - - if remain != "" { - if !strings.HasPrefix(remain, ",") { - return nil, fmt.Errorf("missing comma after %q", consumed) - } - // Eat the separator comma - remain = strings.TrimSpace(remain[1:]) - } - - spec = append(spec, subSpec) - } - - return spec, nil -} - -// parseRubyStyle parses a ruby-style constraint from the prefix of the given -// string and returns the remaining unconsumed string for the caller to use -// for further processing. -func parseRubyStyle(str string) (SelectionSpec, string, error) { - raw, remain := scanConstraint(str) - var spec SelectionSpec - - switch raw.op { - case "=", "": - spec.Operator = OpEqual - case "!=": - spec.Operator = OpNotEqual - case ">": - spec.Operator = OpGreaterThan - case ">=": - spec.Operator = OpGreaterThanOrEqual - case "<": - spec.Operator = OpLessThan - case "<=": - spec.Operator = OpLessThanOrEqual - case "~>": - // Ruby-style pessimistic can be either a minor-only or patch-only - // constraint, depending on how many digits were given. - switch raw.numCt { - case 3: - spec.Operator = OpGreaterThanOrEqualPatchOnly - default: - spec.Operator = OpGreaterThanOrEqualMinorOnly - } - case "=<": - return spec, remain, fmt.Errorf("invalid constraint operator %q; did you mean \"<=\"?", raw.op) - case "=>": - return spec, remain, fmt.Errorf("invalid constraint operator %q; did you mean \">=\"?", raw.op) - default: - return spec, remain, fmt.Errorf("invalid constraint operator %q", raw.op) - } - - switch raw.sep { - case "": - // No separator is always okay. Although all of the examples in the - // rubygems docs show a space separator, the parser doesn't actually - // require it. - case " ": - if raw.op == "" { - return spec, remain, fmt.Errorf("extraneous spaces at start of specification") - } - default: - if raw.op == "" { - return spec, remain, fmt.Errorf("extraneous spaces at start of specification") - } else { - return spec, remain, fmt.Errorf("only one space is expected after the operator %q", raw.op) - } - } - - if raw.numCt > 3 { - return spec, remain, fmt.Errorf("too many numbered portions; only three are allowed (major, minor, patch)") - } - - // Ruby-style doesn't use explicit wildcards - for i, s := range raw.nums { - switch { - case isWildcardNum(s): - // Can't use wildcards in an exact specification - return spec, remain, fmt.Errorf("can't use wildcard for %s number; omit segments that should be unconstrained", rawNumNames[i]) - } - } - - if raw.pre != "" || raw.meta != "" { - // If either the prerelease or meta portions are set then any unconstrained - // segments are implied to be zero in order to guarantee constraint - // consistency. - for i, s := range raw.nums { - if s == "" { - raw.nums[i] = "0" - } - } - } - - spec.Boundary = raw.VersionSpec() - - return spec, remain, nil -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/selectionop_string.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/selectionop_string.go deleted file mode 100644 index e3c2b129..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/selectionop_string.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by "stringer -type SelectionOp"; DO NOT EDIT. - -package constraints - -import "strconv" - -const ( - _SelectionOp_name_0 = "OpUnconstrained" - _SelectionOp_name_1 = "OpMatch" - _SelectionOp_name_2 = "OpLessThanOpEqualOpGreaterThan" - _SelectionOp_name_3 = "OpGreaterThanOrEqualMinorOnly" - _SelectionOp_name_4 = "OpGreaterThanOrEqualPatchOnly" - _SelectionOp_name_5 = "OpNotEqual" - _SelectionOp_name_6 = "OpLessThanOrEqualOpGreaterThanOrEqual" -) - -var ( - _SelectionOp_index_2 = [...]uint8{0, 10, 17, 30} - _SelectionOp_index_6 = [...]uint8{0, 17, 37} -) - -func (i SelectionOp) String() string { - switch { - case i == 0: - return _SelectionOp_name_0 - case i == 42: - return _SelectionOp_name_1 - case 60 <= i && i <= 62: - i -= 60 - return _SelectionOp_name_2[_SelectionOp_index_2[i]:_SelectionOp_index_2[i+1]] - case i == 94: - return _SelectionOp_name_3 - case i == 126: - return _SelectionOp_name_4 - case i == 8800: - return _SelectionOp_name_5 - case 8804 <= i && i <= 8805: - i -= 8804 - return _SelectionOp_name_6[_SelectionOp_index_6[i]:_SelectionOp_index_6[i+1]] - default: - return "SelectionOp(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/spec.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/spec.go deleted file mode 100644 index 2cce0455..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/spec.go +++ /dev/null @@ -1,249 +0,0 @@ -package constraints - -import ( - "bytes" - "fmt" - "strconv" -) - -// Spec is an interface type that UnionSpec, IntersectionSpec, SelectionSpec, -// and VersionSpec all belong to. -// -// It's provided to allow generic code to be written that accepts and operates -// on all specs, but such code must still handle each type separately using -// e.g. a type switch. This is a closed type that will not have any new -// implementations added in future. -type Spec interface { - isSpec() -} - -// UnionSpec represents an "or" operation on nested version constraints. -// -// This is not directly representable in all of our supported constraint -// syntaxes. -type UnionSpec []IntersectionSpec - -func (s UnionSpec) isSpec() {} - -// IntersectionSpec represents an "and" operation on nested version constraints. -type IntersectionSpec []SelectionSpec - -func (s IntersectionSpec) isSpec() {} - -// SelectionSpec represents applying a single operator to a particular -// "boundary" version. -type SelectionSpec struct { - Boundary VersionSpec - Operator SelectionOp -} - -func (s SelectionSpec) isSpec() {} - -// VersionSpec represents the boundary within a SelectionSpec. -type VersionSpec struct { - Major NumConstraint - Minor NumConstraint - Patch NumConstraint - Prerelease string - Metadata string -} - -func (s VersionSpec) isSpec() {} - -// IsExact returns bool if all of the version numbers in the receiver are -// fully-constrained. This is the same as s.ConstraintDepth() == ConstrainedPatch -func (s VersionSpec) IsExact() bool { - return s.ConstraintDepth() == ConstrainedPatch -} - -// ConstraintDepth returns the constraint depth of the receiver, which is -// the most specifc version number segment that is exactly constrained. -// -// The constraints must be consistent, which means that if a given segment -// is unconstrained then all of the deeper segments must also be unconstrained. -// If not, this method will panic. Version specs produced by the parsers in -// this package are guaranteed to be consistent. -func (s VersionSpec) ConstraintDepth() ConstraintDepth { - if s == (VersionSpec{}) { - // zero value is a degenerate case meaning completely unconstrained - return Unconstrained - } - - switch { - case s.Major.Unconstrained: - if !(s.Minor.Unconstrained && s.Patch.Unconstrained && s.Prerelease == "" && s.Metadata == "") { - panic("inconsistent constraint depth") - } - return Unconstrained - case s.Minor.Unconstrained: - if !(s.Patch.Unconstrained && s.Prerelease == "" && s.Metadata == "") { - panic("inconsistent constraint depth") - } - return ConstrainedMajor - case s.Patch.Unconstrained: - if s.Prerelease != "" || s.Metadata != "" { - panic(fmt.Errorf("inconsistent constraint depth: wildcard major, minor and patch followed by prerelease %q and metadata %q", s.Prerelease, s.Metadata)) - } - return ConstrainedMinor - default: - return ConstrainedPatch - } -} - -// ConstraintBounds returns two exact VersionSpecs that represent the upper -// and lower bounds of the possibly-inexact receiver. If the receiver -// is already exact then the two bounds are identical and have operator -// OpEqual. If they are different then the lower bound is OpGreaterThanOrEqual -// and the upper bound is OpLessThan. -// -// As a special case, if the version spec is entirely unconstrained the -// two bounds will be identical and the zero value of SelectionSpec. For -// consistency, this result is also returned if the receiver is already -// the zero value of VersionSpec, since a zero spec represents a lack of -// constraint. -// -// The constraints must be consistent as defined by ConstraintDepth, or this -// method will panic. -func (s VersionSpec) ConstraintBounds() (SelectionSpec, SelectionSpec) { - switch s.ConstraintDepth() { - case Unconstrained: - return SelectionSpec{}, SelectionSpec{} - case ConstrainedMajor: - lowerBound := s.ConstrainToZero() - lowerBound.Metadata = "" - upperBound := lowerBound - upperBound.Major.Num++ - upperBound.Minor.Num = 0 - upperBound.Patch.Num = 0 - upperBound.Prerelease = "" - upperBound.Metadata = "" - return SelectionSpec{ - Operator: OpGreaterThanOrEqual, - Boundary: lowerBound, - }, SelectionSpec{ - Operator: OpLessThan, - Boundary: upperBound, - } - case ConstrainedMinor: - lowerBound := s.ConstrainToZero() - lowerBound.Metadata = "" - upperBound := lowerBound - upperBound.Minor.Num++ - upperBound.Patch.Num = 0 - upperBound.Metadata = "" - return SelectionSpec{ - Operator: OpGreaterThanOrEqual, - Boundary: lowerBound, - }, SelectionSpec{ - Operator: OpLessThan, - Boundary: upperBound, - } - default: - eq := SelectionSpec{ - Operator: OpEqual, - Boundary: s, - } - return eq, eq - } -} - -// ConstrainToZero returns a copy of the receiver with all of its -// unconstrained numeric segments constrained to zero. -func (s VersionSpec) ConstrainToZero() VersionSpec { - switch s.ConstraintDepth() { - case Unconstrained: - s.Major = NumConstraint{Num: 0} - s.Minor = NumConstraint{Num: 0} - s.Patch = NumConstraint{Num: 0} - s.Prerelease = "" - s.Metadata = "" - case ConstrainedMajor: - s.Minor = NumConstraint{Num: 0} - s.Patch = NumConstraint{Num: 0} - s.Prerelease = "" - s.Metadata = "" - case ConstrainedMinor: - s.Patch = NumConstraint{Num: 0} - s.Prerelease = "" - s.Metadata = "" - } - return s -} - -// ConstrainToUpperBound returns a copy of the receiver with all of its -// unconstrained numeric segments constrained to zero and its last -// constrained segment increased by one. -// -// This operation is not meaningful for an entirely unconstrained VersionSpec, -// so will return the zero value of the type in that case. -func (s VersionSpec) ConstrainToUpperBound() VersionSpec { - switch s.ConstraintDepth() { - case Unconstrained: - return VersionSpec{} - case ConstrainedMajor: - s.Major.Num++ - s.Minor = NumConstraint{Num: 0} - s.Patch = NumConstraint{Num: 0} - s.Prerelease = "" - s.Metadata = "" - case ConstrainedMinor: - s.Minor.Num++ - s.Patch = NumConstraint{Num: 0} - s.Prerelease = "" - s.Metadata = "" - } - return s -} - -func (s VersionSpec) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s.%s.%s", s.Major, s.Minor, s.Patch) - if s.Prerelease != "" { - fmt.Fprintf(&buf, "-%s", s.Prerelease) - } - if s.Metadata != "" { - fmt.Fprintf(&buf, "+%s", s.Metadata) - } - return buf.String() -} - -type SelectionOp rune - -//go:generate stringer -type SelectionOp - -const ( - OpUnconstrained SelectionOp = 0 - OpGreaterThan SelectionOp = '>' - OpLessThan SelectionOp = '<' - OpGreaterThanOrEqual SelectionOp = '≥' - OpGreaterThanOrEqualPatchOnly SelectionOp = '~' - OpGreaterThanOrEqualMinorOnly SelectionOp = '^' - OpLessThanOrEqual SelectionOp = '≤' - OpEqual SelectionOp = '=' - OpNotEqual SelectionOp = '≠' - OpMatch SelectionOp = '*' -) - -type NumConstraint struct { - Num uint64 - Unconstrained bool -} - -func (c NumConstraint) String() string { - if c.Unconstrained { - return "*" - } else { - return strconv.FormatUint(c.Num, 10) - } -} - -type ConstraintDepth int - -//go:generate stringer -type ConstraintDepth - -const ( - Unconstrained ConstraintDepth = 0 - ConstrainedMajor ConstraintDepth = 1 - ConstrainedMinor ConstraintDepth = 2 - ConstrainedPatch ConstraintDepth = 3 -) diff --git a/vendor/github.com/apparentlymart/go-versions/versions/constraints/version.go b/vendor/github.com/apparentlymart/go-versions/versions/constraints/version.go deleted file mode 100644 index 9e6f24ea..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/constraints/version.go +++ /dev/null @@ -1,81 +0,0 @@ -package constraints - -import ( - "fmt" - "strings" -) - -// ParseExactVersion parses a string that must contain the specification of a -// single, exact version, and then returns it as a VersionSpec. -// -// This is primarily here to allow versions.ParseVersion to re-use the -// constraint grammar, and isn't very useful for direct use from calling -// applications. -func ParseExactVersion(vs string) (VersionSpec, error) { - spec := VersionSpec{} - - if strings.TrimSpace(vs) == "" { - return spec, fmt.Errorf("empty specification") - } - - raw, remain := scanConstraint(vs) - - switch strings.TrimSpace(raw.op) { - case ">", ">=", "<", "<=", "!", "!=", "~>", "^", "~": - // If it looks like the user was trying to write a constraint string - // then we'll help them out with a more specialized error. - return spec, fmt.Errorf("can't use constraint operator %q; an exact version is required", raw.op) - case "": - // Empty operator is okay as long as we don't also have separator spaces. - // (Caller can trim off spaces beforehand if they want to tolerate this.) - if raw.sep != "" { - return spec, fmt.Errorf("extraneous spaces at start of specification") - } - default: - return spec, fmt.Errorf("invalid sequence %q at start of specification", raw.op) - } - - if remain != "" { - remain = strings.TrimSpace(remain) - switch { - case remain == "": - return spec, fmt.Errorf("extraneous spaces at end of specification") - case strings.HasPrefix(vs, "v"): - // User seems to be trying to use a "v" prefix, like "v1.0.0" - return spec, fmt.Errorf(`a "v" prefix should not be used`) - case strings.HasPrefix(remain, ",") || strings.HasPrefix(remain, "|"): - // User seems to be trying to list/combine multiple versions - return spec, fmt.Errorf("can't specify multiple versions; a single exact version is required") - case strings.HasPrefix(remain, "-"): - // User seems to be trying to use the npm-style range operator - return spec, fmt.Errorf("can't specify version range; a single exact version is required") - case strings.HasPrefix(strings.TrimSpace(vs), remain): - // Whole string is invalid, then. - return spec, fmt.Errorf("invalid specification; required format is three positive integers separated by periods") - default: - return spec, fmt.Errorf("invalid characters %q", remain) - } - } - - if raw.numCt > 3 { - return spec, fmt.Errorf("too many numbered portions; only three are allowed (major, minor, patch)") - } - - for i := raw.numCt; i < len(raw.nums); i++ { - raw.nums[i] = "0" - } - - for i, s := range raw.nums { - switch { - case isWildcardNum(s): - // Can't use wildcards in an exact specification - return spec, fmt.Errorf("can't use wildcard for %s number; an exact version is required", rawNumNames[i]) - } - } - - // Since we eliminated all of the unconstrained cases above, either by normalizing - // or returning an error, we are guaranteed to get constrained numbers here. - spec = raw.VersionSpec() - - return spec, nil -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/doc.go b/vendor/github.com/apparentlymart/go-versions/versions/doc.go deleted file mode 100644 index 6e4ffe52..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package versions is a library for wrangling version numbers in Go. -// -// There are many libraries offering some or all of this functionality. -// This package aims to distinguish itself by offering a more convenient and -// ergonomic API than seen in some other libraries. Code that is resolving -// versions and version constraints tends to be hairy and complex already, so -// an expressive API for talking about these concepts will hopefully help to -// make that code more readable. -// -// The version model is based on Semantic Versioning as defined at -// https://semver.org/ . Semantic Versioning does not include any specification -// for constraints, so the constraint model is based on that used by rubygems, -// allowing for upper and lower bounds as well as individual version exclusions. -package versions diff --git a/vendor/github.com/apparentlymart/go-versions/versions/list.go b/vendor/github.com/apparentlymart/go-versions/versions/list.go deleted file mode 100644 index 083e6858..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/list.go +++ /dev/null @@ -1,149 +0,0 @@ -package versions - -import ( - "sort" -) - -// List is a slice of Version that implements sort.Interface, and also includes -// some other helper functions. -type List []Version - -// Filter removes from the receiver any elements that are not in the given -// set, moving retained elements to lower indices to close any gaps and -// modifying the underlying array in-place. The return value is a slice -// describing the new bounds within the existing backing array. The relative -// ordering of the retained elements is preserved. -// -// The result must always be either the same length or shorter than the -// initial value, so no allocation is required. -// -// As a special case, if the result would be a slice of length zero then a -// nil slice is returned instead, leaving the backing array untouched. -func (l List) Filter(set Set) List { - writeI := 0 - - for readI := range l { - if set.Has(l[readI]) { - l[writeI] = l[readI] - writeI++ - } - } - - if writeI == 0 { - return nil - } - return l[:writeI:len(l)] -} - -// Newest returns the newest version in the list, or Unspecified if the list -// is empty. -// -// Since build metadata does not participate in precedence, it is possible -// that a given list may have multiple equally-new versions; in that case -// Newest will return an arbitrary version from that subset. -func (l List) Newest() Version { - ret := Unspecified - for i := len(l) - 1; i >= 0; i-- { - if l[i].GreaterThan(ret) { - ret = l[i] - } - } - return ret -} - -// NewestInSet is like Filter followed by Newest, except that it does not -// modify the underlying array. This is convenient for the common case of -// selecting the newest version from a set derived from a user-supplied -// constraint. -// -// Similar to Newest, the result is Unspecified if the list is empty or if -// none of the items are in the given set. Also similar to newest, if there -// are multiple newest versions (possibly differentiated only by metadata) -// then one is arbitrarily chosen. -func (l List) NewestInSet(set Set) Version { - ret := Unspecified - for i := len(l) - 1; i >= 0; i-- { - if l[i].GreaterThan(ret) && set.Has(l[i]) { - ret = l[i] - } - } - return ret -} - -// NewestList returns a List containing all of the list items that have the -// highest precedence. -// -// For an already-sorted list, the returned slice is a sub-slice of the -// receiver, sharing the same backing array. For an unsorted list, a new -// array is allocated for the result. For an empty list, the result is always -// nil. -// -// Relative ordering of elements in the receiver is preserved in the output. -func (l List) NewestList() List { - if len(l) == 0 { - return nil - } - - if l.IsSorted() { - // This is a happy path since we can just count off items from the - // end of our existing list until we find one that is not the same - // as the last. - var i int - n := len(l) - for i = n - 1; i >= 0; i-- { - if !l[i].Same(l[n-1]) { - break - } - } - if i < 0 { - i = 0 - } - return l[i:] - } - - // For an unsorted list we'll allocate so that we can construct a new, - // filtered slice. - ret := make(List, 0, 1) // one item is the common case, in the absense of build metadata - example := l.Newest() - for _, v := range l { - if v.Same(example) { - ret = append(ret, v) - } - } - return ret -} - -// Set returns a finite Set containing the versions in the receiver. -// -// Although it is possible to recover a list from the return value using -// its List method, the result may be in a different order and will have -// any duplicate elements from the receiving list consolidated. -func (l List) Set() Set { - return Selection(l...) -} - -func (l List) Len() int { - return len(l) -} - -func (l List) Less(i, j int) bool { - return l[i].LessThan(l[j]) -} - -func (l List) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// Sort applies an in-place sort on the list, preserving the relative order of -// any elements that differ only in build metadata. Earlier versions sort -// first, so the newest versions will be at the highest indices in the list -// once this method returns. -func (l List) Sort() { - sort.Stable(l) -} - -// IsSorted returns true if the list is already in ascending order by -// version priority. -func (l List) IsSorted() bool { - return sort.IsSorted(l) -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/parse.go b/vendor/github.com/apparentlymart/go-versions/versions/parse.go deleted file mode 100644 index 66150e33..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/parse.go +++ /dev/null @@ -1,243 +0,0 @@ -package versions - -import ( - "fmt" - - "github.com/apparentlymart/go-versions/versions/constraints" -) - -// ParseVersion attempts to parse the given string as a semantic version -// specification, and returns the result if successful. -// -// If the given string is not parseable then an error is returned that is -// suitable for display directly to a hypothetical end-user that provided this -// version string, as long as they can read English. -func ParseVersion(s string) (Version, error) { - spec, err := constraints.ParseExactVersion(s) - if err != nil { - return Unspecified, err - } - return versionFromExactVersionSpec(spec), nil -} - -// MustParseVersion is the same as ParseVersion except that it will panic -// instead of returning an error. -func MustParseVersion(s string) Version { - v, err := ParseVersion(s) - if err != nil { - panic(err) - } - return v -} - -// MeetingConstraints returns a version set that contains all of the versions -// that meet the given constraints, specified using the Spec type from the -// constraints package. -// -// The resulting Set has all pre-release versions excluded, except any that -// are explicitly mentioned as exact selections. For example, the constraint -// "2.0.0-beta1 || >2" contains 2.0.0-beta1 but not 2.0.0-beta2 or 3.0.0-beta1. -// This additional constraint on pre-releases can be avoided by calling -// MeetingConstraintsExact instead, at which point the caller can apply other -// logic to deal with prereleases. -// -// This function expects an internally-consistent Spec like what would be -// generated by that package's constraint parsers. Behavior is undefined -- -// including the possibility of panics -- if specs are hand-created and the -// expected invariants aren't met. -func MeetingConstraints(spec constraints.Spec) Set { - exact := MeetingConstraintsExact(spec) - reqd := exact.AllRequested().List() - set := Intersection(Released, exact) - reqd = reqd.Filter(Prerelease) - if len(reqd) != 0 { - set = Union(Selection(reqd...), set) - } - return set -} - -// MeetingConstraintsExact is like MeetingConstraints except that it doesn't -// apply the extra rules to exclude pre-release versions that are not -// explicitly requested. -// -// This means that given a constraint ">=1.0.0 <2.0.0" a hypothetical version -// 2.0.0-beta1 _is_ in the returned set, because prerelease versions have -// lower precedence than their corresponding release. -// -// A caller can use this to implement its own specialized handling of -// pre-release versions by applying additional set operations to the result, -// such as intersecting it with the predefined set versions.Released to -// remove prerelease versions altogether. -func MeetingConstraintsExact(spec constraints.Spec) Set { - if spec == nil { - return All - } - - switch ts := spec.(type) { - - case constraints.VersionSpec: - lowerBound, upperBound := ts.ConstraintBounds() - switch lowerBound.Operator { - case constraints.OpUnconstrained: - return All - case constraints.OpEqual: - return Only(versionFromExactVersionSpec(lowerBound.Boundary)) - default: - return AtLeast( - versionFromExactVersionSpec(lowerBound.Boundary), - ).Intersection( - OlderThan(versionFromExactVersionSpec(upperBound.Boundary))) - } - - case constraints.SelectionSpec: - lower := ts.Boundary.ConstrainToZero() - if ts.Operator != constraints.OpEqual && ts.Operator != constraints.OpNotEqual { - lower.Metadata = "" // metadata is only considered for exact matches - } - - switch ts.Operator { - case constraints.OpUnconstrained: - // Degenerate case, but we'll allow it. - return All - case constraints.OpMatch: - // The match operator uses the constraints implied by the - // Boundary version spec as the specification. - // Note that we discard "lower" in this case, because we do want - // to match our metadata if it's specified. - return MeetingConstraintsExact(ts.Boundary) - case constraints.OpEqual, constraints.OpNotEqual: - set := Only(versionFromExactVersionSpec(lower)) - if ts.Operator == constraints.OpNotEqual { - // We want everything _except_ what's in our set, then. - set = All.Subtract(set) - } - return set - case constraints.OpGreaterThan: - return NewerThan(versionFromExactVersionSpec(lower)) - case constraints.OpGreaterThanOrEqual: - return AtLeast(versionFromExactVersionSpec(lower)) - case constraints.OpLessThan: - return OlderThan(versionFromExactVersionSpec(lower)) - case constraints.OpLessThanOrEqual: - return AtMost(versionFromExactVersionSpec(lower)) - case constraints.OpGreaterThanOrEqualMinorOnly: - upper := lower - upper.Major.Num++ - upper.Minor.Num = 0 - upper.Patch.Num = 0 - upper.Prerelease = "" - return AtLeast( - versionFromExactVersionSpec(lower), - ).Intersection( - OlderThan(versionFromExactVersionSpec(upper))) - case constraints.OpGreaterThanOrEqualPatchOnly: - upper := lower - upper.Minor.Num++ - upper.Patch.Num = 0 - upper.Prerelease = "" - return AtLeast( - versionFromExactVersionSpec(lower), - ).Intersection( - OlderThan(versionFromExactVersionSpec(upper))) - default: - panic(fmt.Errorf("unsupported constraints.SelectionOp %s", ts.Operator)) - } - - case constraints.UnionSpec: - if len(ts) == 0 { - return All - } - if len(ts) == 1 { - return MeetingConstraintsExact(ts[0]) - } - union := make(setUnion, len(ts)) - for i, subSpec := range ts { - union[i] = MeetingConstraintsExact(subSpec).setI - } - return Set{setI: union} - - case constraints.IntersectionSpec: - if len(ts) == 0 { - return All - } - if len(ts) == 1 { - return MeetingConstraintsExact(ts[0]) - } - intersection := make(setIntersection, len(ts)) - for i, subSpec := range ts { - intersection[i] = MeetingConstraintsExact(subSpec).setI - } - return Set{setI: intersection} - - default: - // should never happen because the above cases are exhaustive for - // all valid constraint implementations. - panic(fmt.Errorf("unsupported constraints.Spec implementation %T", spec)) - } -} - -// MeetingConstraintsString attempts to parse the given spec as a constraints -// string in our canonical format, which is most similar to the syntax used by -// npm, Go's "dep" tool, Rust's "cargo", etc. -// -// This is a covenience wrapper around calling constraints.Parse and then -// passing the result to MeetingConstraints. Call into the constraints package -// yourself for access to the constraint tree. -// -// If unsuccessful, the error from the underlying parser is returned verbatim. -// Parser errors are suitable for showing to an end-user in situations where -// the given spec came from user input. -func MeetingConstraintsString(spec string) (Set, error) { - s, err := constraints.Parse(spec) - if err != nil { - return None, err - } - return MeetingConstraints(s), nil -} - -// MeetingConstraintsStringRuby attempts to parse the given spec as a -// "Ruby-style" version constraint string, and returns the set of versions -// that match the constraint if successful. -// -// If unsuccessful, the error from the underlying parser is returned verbatim. -// Parser errors are suitable for showing to an end-user in situations where -// the given spec came from user input. -// -// "Ruby-style" here is not a promise of exact compatibility with rubygems -// or any other Ruby tools. Rather, it refers to this parser using a syntax -// that is intended to feel familiar to those who are familiar with rubygems -// syntax. -// -// Constraints are parsed in "multi" mode, allowing multiple comma-separated -// constraints that are combined with the Intersection operator. For more -// control over the parsing process, use the constraints package API directly -// and then call MeetingConstraints. -func MeetingConstraintsStringRuby(spec string) (Set, error) { - s, err := constraints.ParseRubyStyleMulti(spec) - if err != nil { - return None, err - } - return MeetingConstraints(s), nil -} - -// MustMakeSet can be used to wrap any function that returns a set and an error -// to make it panic if an error occurs and return the set otherwise. -// -// This is intended for tests and other situations where input is from -// known-good constants. -func MustMakeSet(set Set, err error) Set { - if err != nil { - panic(err) - } - return set -} - -func versionFromExactVersionSpec(spec constraints.VersionSpec) Version { - return Version{ - Major: spec.Major.Num, - Minor: spec.Minor.Num, - Patch: spec.Patch.Num, - Prerelease: VersionExtra(spec.Prerelease), - Metadata: VersionExtra(spec.Metadata), - } -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set.go b/vendor/github.com/apparentlymart/go-versions/versions/set.go deleted file mode 100644 index ad4d5efe..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set.go +++ /dev/null @@ -1,89 +0,0 @@ -package versions - -// Set is a set of versions, usually created by parsing a constraint string. -type Set struct { - setI -} - -// setI is the private interface implemented by our various constraint -// operators. -type setI interface { - Has(v Version) bool - AllRequested() Set - GoString() string -} - -// Has returns true if the given version is a member of the receiving set. -func (s Set) Has(v Version) bool { - // The special Unspecified version is excluded as soon as any sort of - // constraint is applied, and so the only set it is a member of is - // the special All set. - if v == Unspecified { - return s == All - } - - return s.setI.Has(v) -} - -// Requests returns true if the given version is specifically requested by -// the receiving set. -// -// Requesting is a stronger form of set membership that represents an explicit -// request for a particular version, as opposed to the version just happening -// to match some criteria. -// -// The functions Only and Selection mark their arguments as requested in -// their returned sets. Exact version constraints given in constraint strings -// also mark their versions as requested. -// -// The concept of requesting is intended to help deal with pre-release versions -// in a safe and convenient way. When given generic version constraints like -// ">= 1.0.0" the user generally does not intend to match a pre-release version -// like "2.0.0-beta1", but it is important to stil be able to use that -// version if explicitly requested using the constraint string "2.0.0-beta1". -func (s Set) Requests(v Version) bool { - return s.AllRequested().Has(v) -} - -// AllRequested returns a subset of the receiver containing only the requested -// versions, as defined in the documentation for the method Requests. -// -// This can be used in conjunction with the predefined set "Released" to -// include pre-release versions only by explicit request, which is supported -// via the helper method WithoutUnrequestedPrereleases. -// -// The result of AllRequested is always a finite set. -func (s Set) AllRequested() Set { - return s.setI.AllRequested() -} - -// WithoutUnrequestedPrereleases returns a new set that includes all released -// versions from the receiving set, plus any explicitly-requested pre-releases, -// but does not include any unrequested pre-releases. -// -// "Requested" here is as defined in the documentation for the "Requests" method. -// -// This method is equivalent to the following set operations: -// -// versions.Union(s.AllRequested(), s.Intersection(versions.Released)) -func (s Set) WithoutUnrequestedPrereleases() Set { - return Union(s.AllRequested(), Released.Intersection(s)) -} - -// UnmarshalText is an implementation of encoding.TextUnmarshaler, allowing -// sets to be automatically unmarshalled from strings in text-based -// serialization formats, including encoding/json. -// -// The format expected is what is accepted by MeetingConstraintsString. Any -// parser errors are passed on verbatim to the caller. -func (s *Set) UnmarshalText(text []byte) error { - str := string(text) - new, err := MeetingConstraintsString(str) - if err != nil { - return err - } - *s = new - return nil -} - -var InitialDevelopment Set = OlderThan(MustParseVersion("1.0.0")) diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_bound.go b/vendor/github.com/apparentlymart/go-versions/versions/set_bound.go deleted file mode 100644 index 2e2ba09c..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_bound.go +++ /dev/null @@ -1,98 +0,0 @@ -package versions - -import ( - "fmt" -) - -type setBound struct { - v Version - op setBoundOp -} - -func (s setBound) Has(v Version) bool { - switch s.op { - case setBoundGT: - return v.GreaterThan(s.v) - case setBoundGTE: - return v.GreaterThan(s.v) || v.Same(s.v) - case setBoundLT: - return v.LessThan(s.v) - case setBoundLTE: - return v.LessThan(s.v) || v.Same(s.v) - default: - // Should never happen because the above is exhaustive - panic("invalid setBound operator") - } -} - -func (s setBound) AllRequested() Set { - // Inequalities request nothing. - return None -} - -func (s setBound) GoString() string { - switch s.op { - case setBoundGT: - return fmt.Sprintf("versions.NewerThan(%#v)", s.v) - case setBoundGTE: - return fmt.Sprintf("versions.AtLeast(%#v)", s.v) - case setBoundLT: - return fmt.Sprintf("versions.OlderThan(%#v)", s.v) - case setBoundLTE: - return fmt.Sprintf("versions.AtMost(%#v)", s.v) - default: - // Should never happen because the above is exhaustive - return fmt.Sprintf("versions.Set{versions.setBound{v:%#v,op:%#v}}", s.v, s.op) - } -} - -// NewerThan returns a set containing all versions greater than the given -// version, non-inclusive. -func NewerThan(v Version) Set { - return Set{ - setI: setBound{ - v: v, - op: setBoundGT, - }, - } -} - -// OlderThan returns a set containing all versions lower than the given -// version, non-inclusive. -func OlderThan(v Version) Set { - return Set{ - setI: setBound{ - v: v, - op: setBoundLT, - }, - } -} - -// AtLeast returns a set containing all versions greater than or equal to -// the given version. -func AtLeast(v Version) Set { - return Set{ - setI: setBound{ - v: v, - op: setBoundGTE, - }, - } -} - -// AtMost returns a set containing all versions less than or equal to the given -// version, non-inclusive. -func AtMost(v Version) Set { - return Set{ - setI: setBound{ - v: v, - op: setBoundLTE, - }, - } -} - -type setBoundOp rune - -const setBoundGT = '>' -const setBoundGTE = '≥' -const setBoundLT = '<' -const setBoundLTE = '≤' diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_exact.go b/vendor/github.com/apparentlymart/go-versions/versions/set_exact.go deleted file mode 100644 index 4b2fa7bc..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_exact.go +++ /dev/null @@ -1,103 +0,0 @@ -package versions - -import ( - "bytes" - "fmt" -) - -type setExact map[Version]struct{} - -func (s setExact) Has(v Version) bool { - _, has := s[v] - return has -} - -func (s setExact) AllRequested() Set { - // We just return the receiver verbatim here, because everything in it - // is explicitly requested. - return Set{setI: s} -} - -func (s setExact) GoString() string { - if len(s) == 0 { - // Degenerate case; caller should use None instead - return "versions.Set{setExact{}}" - } - - if len(s) == 1 { - var first Version - for v := range s { - first = v - break - } - return fmt.Sprintf("versions.Only(%#v)", first) - } - - var buf bytes.Buffer - fmt.Fprint(&buf, "versions.Selection(") - versions := s.listVersions() - versions.Sort() - for i, version := range versions { - if i == 0 { - fmt.Fprint(&buf, version.GoString()) - } else { - fmt.Fprintf(&buf, ", %#v", version) - } - } - fmt.Fprint(&buf, ")") - return buf.String() -} - -// Only returns a version set containing only the given version. -// -// This function is guaranteed to produce a finite set. -func Only(v Version) Set { - return Set{ - setI: setExact{v: struct{}{}}, - } -} - -// Selection returns a version set containing only the versions given -// as arguments. -// -// This function is guaranteed to produce a finite set. -func Selection(vs ...Version) Set { - if len(vs) == 0 { - return None - } - ret := make(setExact) - for _, v := range vs { - ret[v] = struct{}{} - } - return Set{setI: ret} -} - -// Exactly returns true if and only if the receiving set is finite and -// contains only a single version that is the same as the version given. -func (s Set) Exactly(v Version) bool { - if !s.IsFinite() { - return false - } - l := s.List() - if len(l) != 1 { - return false - } - return v.Same(l[0]) -} - -var _ setFinite = setExact(nil) - -func (s setExact) isFinite() bool { - return true -} - -func (s setExact) listVersions() List { - if len(s) == 0 { - return nil - } - ret := make(List, 0, len(s)) - for v := range s { - ret = append(ret, v) - } - return ret -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_extremes.go b/vendor/github.com/apparentlymart/go-versions/versions/set_extremes.go deleted file mode 100644 index cae13f99..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_extremes.go +++ /dev/null @@ -1,49 +0,0 @@ -package versions - -// All is an infinite set containing all possible versions. -var All Set - -// None is a finite set containing no versions. -var None Set - -type setExtreme bool - -func (s setExtreme) Has(v Version) bool { - return bool(s) -} - -func (s setExtreme) AllRequested() Set { - // The extreme sets request nothing. - return None -} - -func (s setExtreme) GoString() string { - switch bool(s) { - case true: - return "versions.All" - case false: - return "versions.None" - default: - panic("strange new boolean value") - } -} - -var _ setFinite = setExtreme(false) - -func (s setExtreme) isFinite() bool { - // Only None is finite - return !bool(s) -} - -func (s setExtreme) listVersions() List { - return nil -} - -func init() { - All = Set{ - setI: setExtreme(true), - } - None = Set{ - setI: setExtreme(false), - } -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_finite.go b/vendor/github.com/apparentlymart/go-versions/versions/set_finite.go deleted file mode 100644 index eb1a5dc2..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_finite.go +++ /dev/null @@ -1,34 +0,0 @@ -package versions - -// setFinite is the interface implemented by set implementations that -// represent a finite number of versions, and can thus list those versions. -type setFinite interface { - isFinite() bool - listVersions() List -} - -// IsFinite returns true if the set represents a finite number of versions, -// and can thus support List without panicking. -func (s Set) IsFinite() bool { - return isFinite(s.setI) -} - -// List returns the specific versions represented by a finite list, in an -// undefined order. If desired, the caller can sort the resulting list -// using its Sort method. -// -// If the set is not finite, this method will panic. Use IsFinite to check -// unless a finite set was guaranteed by whatever operation(s) constructed -// the set. -func (s Set) List() List { - finite, ok := s.setI.(setFinite) - if !ok || !finite.isFinite() { - panic("List called on infinite set") - } - return finite.listVersions() -} - -func isFinite(s setI) bool { - finite, ok := s.(setFinite) - return ok && finite.isFinite() -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_intersection.go b/vendor/github.com/apparentlymart/go-versions/versions/set_intersection.go deleted file mode 100644 index 4afd1b42..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_intersection.go +++ /dev/null @@ -1,132 +0,0 @@ -package versions - -import ( - "bytes" - "fmt" -) - -type setIntersection []setI - -func (s setIntersection) Has(v Version) bool { - if len(s) == 0 { - // Weird to have an intersection with no elements, but we'll - // allow it and return something sensible. - return false - } - for _, ss := range s { - if !ss.Has(v) { - return false - } - } - return true -} - -func (s setIntersection) AllRequested() Set { - // The requested set for an intersection is the union of all of its - // members requested sets intersection the receiver. Therefore we'll - // borrow the same logic from setUnion's implementation here but - // then wrap it up in a setIntersection before we return. - - asUnion := setUnion(s) - ar := asUnion.AllRequested() - si := make(setIntersection, len(s)+1) - si[0] = ar.setI - copy(si[1:], s) - return Set{setI: si} -} - -func (s setIntersection) GoString() string { - var buf bytes.Buffer - fmt.Fprint(&buf, "versions.Intersection(") - for i, ss := range s { - if i == 0 { - fmt.Fprint(&buf, ss.GoString()) - } else { - fmt.Fprintf(&buf, ", %#v", ss) - } - } - fmt.Fprint(&buf, ")") - return buf.String() -} - -// Intersection creates a new set that contains the versions that all of the -// given sets have in common. -// -// The result is finite if any of the given sets are finite. -func Intersection(sets ...Set) Set { - if len(sets) == 0 { - return None - } - - r := make(setIntersection, 0, len(sets)) - for _, set := range sets { - if set == All { - continue - } - if set == None { - return None - } - if su, ok := set.setI.(setIntersection); ok { - r = append(r, su...) - } else { - r = append(r, set.setI) - } - } - if len(r) == 1 { - return Set{setI: r[0]} - } - return Set{setI: r} -} - -// Intersection returns a new set that contains all of the versions that -// the receiver and the given sets have in common. -// -// The result is a finite set if the receiver or any of the given sets are -// finite. -func (s Set) Intersection(others ...Set) Set { - r := make(setIntersection, 1, len(others)+1) - r[0] = s.setI - for _, ss := range others { - if ss == All { - continue - } - if ss == None { - return None - } - if su, ok := ss.setI.(setIntersection); ok { - r = append(r, su...) - } else { - r = append(r, ss.setI) - } - } - if len(r) == 1 { - return Set{setI: r[0]} - } - return Set{setI: r} -} - -var _ setFinite = setIntersection{} - -func (s setIntersection) isFinite() bool { - // intersection is finite if any of its members are, or if it is empty - if len(s) == 0 { - return true - } - for _, ss := range s { - if isFinite(ss) { - return true - } - } - return false -} - -func (s setIntersection) listVersions() List { - var ret List - for _, ss := range s { - if isFinite(ss) { - ret = append(ret, ss.(setFinite).listVersions()...) - } - } - ret.Filter(Set{setI: s}) - return ret -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_released.go b/vendor/github.com/apparentlymart/go-versions/versions/set_released.go deleted file mode 100644 index dea24036..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_released.go +++ /dev/null @@ -1,30 +0,0 @@ -package versions - -type setReleased struct{} - -func (s setReleased) Has(v Version) bool { - return v.Prerelease == "" -} - -func (s setReleased) AllRequested() Set { - // The set of all released versions requests nothing. - return None -} - -func (s setReleased) GoString() string { - return "versions.Released" -} - -// Released is a set containing all versions that have an empty prerelease -// string. -var Released Set - -// Prerelease is a set containing all versions that have a prerelease marker. -// This is the complement of Released, or in other words it is -// All.Subtract(Released). -var Prerelease Set - -func init() { - Released = Set{setI: setReleased{}} - Prerelease = All.Subtract(Released) -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_subtract.go b/vendor/github.com/apparentlymart/go-versions/versions/set_subtract.go deleted file mode 100644 index 19a9c01e..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_subtract.go +++ /dev/null @@ -1,56 +0,0 @@ -package versions - -import "fmt" - -type setSubtract struct { - from setI - sub setI -} - -func (s setSubtract) Has(v Version) bool { - return s.from.Has(v) && !s.sub.Has(v) -} - -func (s setSubtract) AllRequested() Set { - // Our set requests anything that is requested by "from", unless it'd - // be excluded by "sub". Notice that the whole of "sub" is used, rather - // than just the requested parts, because requesting is a positive - // action only. - return Set{setI: s.from}.AllRequested().Subtract(Set{setI: s.sub}) -} - -func (s setSubtract) GoString() string { - return fmt.Sprintf("(%#v).Subtract(%#v)", s.from, s.sub) -} - -// Subtract returns a new set that has all of the versions from the receiver -// except for any versions in the other given set. -// -// If the receiver is finite then the returned set is also finite. -func (s Set) Subtract(other Set) Set { - if other == None || s == None { - return s - } - if other == All { - return None - } - return Set{ - setI: setSubtract{ - from: s.setI, - sub: other.setI, - }, - } -} - -var _ setFinite = setSubtract{} - -func (s setSubtract) isFinite() bool { - // subtract is finite if its "from" is finite - return isFinite(s.from) -} - -func (s setSubtract) listVersions() List { - ret := s.from.(setFinite).listVersions() - ret = ret.Filter(Set{setI: s.sub}) - return ret -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/set_union.go b/vendor/github.com/apparentlymart/go-versions/versions/set_union.go deleted file mode 100644 index 1482f690..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/set_union.go +++ /dev/null @@ -1,121 +0,0 @@ -package versions - -import ( - "bytes" - "fmt" -) - -type setUnion []setI - -func (s setUnion) Has(v Version) bool { - for _, ss := range s { - if ss.Has(v) { - return true - } - } - return false -} - -func (s setUnion) AllRequested() Set { - // Since a union includes everything from its members, it includes all - // of the requested versions from its members too. - if len(s) == 0 { - return None - } - si := make(setUnion, 0, len(s)) - for _, ss := range s { - ar := ss.AllRequested() - if ar == None { - continue - } - si = append(si, ar.setI) - } - if len(si) == 1 { - return Set{setI: si[0]} - } - return Set{setI: si} -} - -func (s setUnion) GoString() string { - var buf bytes.Buffer - fmt.Fprint(&buf, "versions.Union(") - for i, ss := range s { - if i == 0 { - fmt.Fprint(&buf, ss.GoString()) - } else { - fmt.Fprintf(&buf, ", %#v", ss) - } - } - fmt.Fprint(&buf, ")") - return buf.String() -} - -// Union creates a new set that contains all of the given versions. -// -// The result is finite only if the receiver and all of the other given sets -// are finite. -func Union(sets ...Set) Set { - if len(sets) == 0 { - return None - } - - r := make(setUnion, 0, len(sets)) - for _, set := range sets { - if set == None { - continue - } - if su, ok := set.setI.(setUnion); ok { - r = append(r, su...) - } else { - r = append(r, set.setI) - } - } - if len(r) == 1 { - return Set{setI: r[0]} - } - return Set{setI: r} -} - -// Union returns a new set that contains all of the versions from the -// receiver and all of the versions from each of the other given sets. -// -// The result is finite only if the receiver and all of the other given sets -// are finite. -func (s Set) Union(others ...Set) Set { - r := make(setUnion, 1, len(others)+1) - r[0] = s.setI - for _, ss := range others { - if ss == None { - continue - } - if su, ok := ss.setI.(setUnion); ok { - r = append(r, su...) - } else { - r = append(r, ss.setI) - } - } - if len(r) == 1 { - return Set{setI: r[0]} - } - return Set{setI: r} -} - -var _ setFinite = setUnion{} - -func (s setUnion) isFinite() bool { - // union is finite only if all of its members are finite - for _, ss := range s { - if !isFinite(ss) { - return false - } - } - return true -} - -func (s setUnion) listVersions() List { - var ret List - for _, ss := range s { - ret = append(ret, ss.(setFinite).listVersions()...) - } - return ret -} diff --git a/vendor/github.com/apparentlymart/go-versions/versions/version.go b/vendor/github.com/apparentlymart/go-versions/versions/version.go deleted file mode 100644 index 8cd0eb5a..00000000 --- a/vendor/github.com/apparentlymart/go-versions/versions/version.go +++ /dev/null @@ -1,222 +0,0 @@ -package versions - -import ( - "fmt" - "strings" -) - -// Version represents a single version. -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Prerelease VersionExtra - Metadata VersionExtra -} - -// Unspecified is the zero value of Version and represents the absense of a -// version number. -// -// Note that this is indistinguishable from the explicit version that -// results from parsing the string "0.0.0". -var Unspecified Version - -// Same returns true if the receiver has the same precedence as the other -// given version. In other words, it has the same major, minor and patch -// version number and an identical prerelease portion. The Metadata, if -// any, is not considered. -func (v Version) Same(other Version) bool { - return (v.Major == other.Major && - v.Minor == other.Minor && - v.Patch == other.Patch && - v.Prerelease == other.Prerelease) -} - -// Comparable returns a version that is the same as the receiver but its -// metadata is the empty string. For Comparable versions, the standard -// equality operator == is equivalent to method Same. -func (v Version) Comparable() Version { - v.Metadata = "" - return v -} - -// String is an implementation of fmt.Stringer that returns the receiver -// in the canonical "semver" format. -func (v Version) String() string { - s := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) - if v.Prerelease != "" { - s = fmt.Sprintf("%s-%s", s, v.Prerelease) - } - if v.Metadata != "" { - s = fmt.Sprintf("%s+%s", s, v.Metadata) - } - return s -} - -func (v Version) GoString() string { - return fmt.Sprintf("versions.MustParseVersion(%q)", v.String()) -} - -// LessThan returns true if the receiver has a lower precedence than the -// other given version, as defined by the semantic versioning specification. -func (v Version) LessThan(other Version) bool { - switch { - case v.Major != other.Major: - return v.Major < other.Major - case v.Minor != other.Minor: - return v.Minor < other.Minor - case v.Patch != other.Patch: - return v.Patch < other.Patch - case v.Prerelease != other.Prerelease: - if v.Prerelease == "" { - return false - } - if other.Prerelease == "" { - return true - } - return v.Prerelease.LessThan(other.Prerelease) - default: - return false - } -} - -// GreaterThan returns true if the receiver has a higher precedence than the -// other given version, as defined by the semantic versioning specification. -func (v Version) GreaterThan(other Version) bool { - switch { - case v.Major != other.Major: - return v.Major > other.Major - case v.Minor != other.Minor: - return v.Minor > other.Minor - case v.Patch != other.Patch: - return v.Patch > other.Patch - case v.Prerelease != other.Prerelease: - if v.Prerelease == "" { - return true - } - if other.Prerelease == "" { - return false - } - return !v.Prerelease.LessThan(other.Prerelease) - default: - return false - } -} - -// MarshalText is an implementation of encoding.TextMarshaler, allowing versions -// to be automatically marshalled for text-based serialization formats, -// including encoding/json. -// -// The format used is that returned by String, which can be parsed using -// ParseVersion. -func (v Version) MarshalText() (text []byte, err error) { - return []byte(v.String()), nil -} - -// UnmarshalText is an implementation of encoding.TextUnmarshaler, allowing -// versions to be automatically unmarshalled from strings in text-based -// serialization formats, including encoding/json. -// -// The format expected is what is accepted by ParseVersion. Any parser errors -// are passed on verbatim to the caller. -func (v *Version) UnmarshalText(text []byte) error { - str := string(text) - new, err := ParseVersion(str) - if err != nil { - return err - } - *v = new - return nil -} - -// VersionExtra represents a string containing dot-delimited tokens, as used -// in the pre-release and build metadata portions of a Semantic Versioning -// version expression. -type VersionExtra string - -// Parts tokenizes the string into its separate parts by splitting on dots. -// -// The result is undefined if the receiver is not valid per the semver spec, -func (e VersionExtra) Parts() []string { - return strings.Split(string(e), ".") -} - -func (e VersionExtra) Raw() string { - return string(e) -} - -// LessThan returns true if the receiever has lower precedence than the -// other given VersionExtra string, per the rules defined in the semver -// spec for pre-release versions. -// -// Build metadata has no defined precedence rules, so it is not meaningful -// to call this method on a VersionExtra representing build metadata. -func (e VersionExtra) LessThan(other VersionExtra) bool { - if e == other { - // Easy path - return false - } - - s1 := string(e) - s2 := string(other) - for { - d1 := strings.IndexByte(s1, '.') - d2 := strings.IndexByte(s2, '.') - - switch { - case d1 == -1 && d2 != -1: - // s1 has fewer parts, so it precedes s2 - return true - case d2 == -1 && d1 != -1: - // s1 has more parts, so it succeeds s2 - return false - case d1 == -1: // d2 must be -1 too, because of the above - // this is our last portion to compare - return lessThanStr(s1, s2) - default: - s1s := s1[:d1] - s2s := s2[:d2] - if s1s != s2s { - return lessThanStr(s1s, s2s) - } - s1 = s1[d1+1:] - s2 = s2[d2+1:] - } - } -} - -func lessThanStr(s1, s2 string) bool { - // How we compare here depends on whether the string is entirely consistent of digits - s1Numeric := true - s2Numeric := true - for _, c := range s1 { - if c < '0' || c > '9' { - s1Numeric = false - break - } - } - for _, c := range s2 { - if c < '0' || c > '9' { - s2Numeric = false - break - } - } - - switch { - case s1Numeric && !s2Numeric: - return true - case s2Numeric && !s1Numeric: - return false - case s1Numeric: // s2Numeric must also be true - switch { - case len(s1) < len(s2): - return true - case len(s2) < len(s1): - return false - default: - return s1 < s2 - } - default: - return s1 < s2 - } -} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml deleted file mode 100644 index 102fb9a6..00000000 --- a/vendor/github.com/blang/semver/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -matrix: - include: - - go: 1.4.3 - - go: 1.5.4 - - go: 1.6.3 - - go: 1.7 - - go: tip - allow_failures: - - go: tip -install: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -script: -- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci - -repotoken $COVERALLS_TOKEN -- echo "Build examples" ; cd examples && go build -- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) -env: - global: - secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86f..00000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 08b2e4a3..00000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Wildcards `>=1.x`, `<=2.5.x` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -Note that spaces between the operator and the version will be gracefully tolerated. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c4..00000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json deleted file mode 100644 index 1cf8ebdd..00000000 --- a/vendor/github.com/blang/semver/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "author": "blang", - "bugs": { - "URL": "https://github.com/blang/semver/issues", - "url": "https://github.com/blang/semver/issues" - }, - "gx": { - "dvcsimport": "github.com/blang/semver" - }, - "gxVersion": "0.10.0", - "language": "go", - "license": "MIT", - "name": "semver", - "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.5.1" -} - diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d4..00000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842e..00000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f8808..00000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d8026..00000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/bmatcuk/doublestar/.gitignore b/vendor/github.com/bmatcuk/doublestar/.gitignore deleted file mode 100644 index af212ecc..00000000 --- a/vendor/github.com/bmatcuk/doublestar/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# vi -*~ -*.swp -*.swo - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# test directory -test/ diff --git a/vendor/github.com/bmatcuk/doublestar/.travis.yml b/vendor/github.com/bmatcuk/doublestar/.travis.yml deleted file mode 100644 index ec4fee88..00000000 --- a/vendor/github.com/bmatcuk/doublestar/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.11 - - 1.12 - -before_install: - - go get -t -v ./... - -script: - - go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/bmatcuk/doublestar/LICENSE b/vendor/github.com/bmatcuk/doublestar/LICENSE deleted file mode 100644 index 309c9d1d..00000000 --- a/vendor/github.com/bmatcuk/doublestar/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Bob Matcuk - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/bmatcuk/doublestar/README.md b/vendor/github.com/bmatcuk/doublestar/README.md deleted file mode 100644 index 8e365c5e..00000000 --- a/vendor/github.com/bmatcuk/doublestar/README.md +++ /dev/null @@ -1,109 +0,0 @@ -![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master) -[![Build Status](https://travis-ci.org/bmatcuk/doublestar.svg?branch=master)](https://travis-ci.org/bmatcuk/doublestar) -[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) - -# doublestar - -**doublestar** is a [golang](http://golang.org/) implementation of path pattern -matching and globbing with support for "doublestar" (aka globstar: `**`) -patterns. - -doublestar patterns match files and directories recursively. For example, if -you had the following directory structure: - -``` -grandparent -`-- parent - |-- child1 - `-- child2 -``` - -You could find the children with patterns such as: `**/child*`, -`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will -return all files and directories recursively). - -Bash's globstar is doublestar's inspiration and, as such, works similarly. -Note that the doublestar must appear as a path component by itself. A pattern -such as `/path**` is invalid and will be treated the same as `/path*`, but -`/path*/**` should achieve the desired result. Additionally, `/path/**` will -match all directories and files under the path directory, but `/path/**/` will -only match directories. - -## Installation - -**doublestar** can be installed via `go get`: - -```bash -go get github.com/bmatcuk/doublestar -``` - -To use it in your code, you must import it: - -```go -import "github.com/bmatcuk/doublestar" -``` - -## Functions - -### Match -```go -func Match(pattern, name string) (bool, error) -``` - -Match returns true if `name` matches the file name `pattern` -([see below](#patterns)). `name` and `pattern` are split on forward slash (`/`) -characters and may be relative or absolute. - -Note: `Match()` is meant to be a drop-in replacement for `path.Match()`. As -such, it always uses `/` as the path separator. If you are writing code that -will run on systems where `/` is not the path separator (such as Windows), you -want to use `PathMatch()` (below) instead. - - -### PathMatch -```go -func PathMatch(pattern, name string) (bool, error) -``` - -PathMatch returns true if `name` matches the file name `pattern` -([see below](#patterns)). The difference between Match and PathMatch is that -PathMatch will automatically use your system's path separator to split `name` -and `pattern`. - -`PathMatch()` is meant to be a drop-in replacement for `filepath.Match()`. - -### Glob -```go -func Glob(pattern string) ([]string, error) -``` - -Glob finds all files and directories in the filesystem that match `pattern` -([see below](#patterns)). `pattern` may be relative (to the current working -directory), or absolute. - -`Glob()` is meant to be a drop-in replacement for `filepath.Glob()`. - -## Patterns - -**doublestar** supports the following special terms in the patterns: - -Special Terms | Meaning -------------- | ------- -`*` | matches any sequence of non-path-separators -`**` | matches any sequence of characters, including path separators -`?` | matches any single non-path-separator character -`[class]` | matches any single non-path-separator character against a class of characters ([see below](#character-classes)) -`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches - -Any character with a special meaning can be escaped with a backslash (`\`). - -### Character Classes - -Character classes support the following: - -Class | Meaning ----------- | ------- -`[abc]` | matches any single character within the set -`[a-z]` | matches any single character in the range -`[^class]` | matches any single character which does *not* match the class - diff --git a/vendor/github.com/bmatcuk/doublestar/doublestar.go b/vendor/github.com/bmatcuk/doublestar/doublestar.go deleted file mode 100644 index 0044dfa8..00000000 --- a/vendor/github.com/bmatcuk/doublestar/doublestar.go +++ /dev/null @@ -1,476 +0,0 @@ -package doublestar - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strings" - "unicode/utf8" -) - -// ErrBadPattern indicates a pattern was malformed. -var ErrBadPattern = path.ErrBadPattern - -// Split a path on the given separator, respecting escaping. -func splitPathOnSeparator(path string, separator rune) (ret []string) { - idx := 0 - if separator == '\\' { - // if the separator is '\\', then we can just split... - ret = strings.Split(path, string(separator)) - idx = len(ret) - } else { - // otherwise, we need to be careful of situations where the separator was escaped - cnt := strings.Count(path, string(separator)) - if cnt == 0 { - return []string{path} - } - - ret = make([]string, cnt+1) - pathlen := len(path) - separatorLen := utf8.RuneLen(separator) - emptyEnd := false - for start := 0; start < pathlen; { - end := indexRuneWithEscaping(path[start:], separator) - if end == -1 { - emptyEnd = false - end = pathlen - } else { - emptyEnd = true - end += start - } - ret[idx] = path[start:end] - start = end + separatorLen - idx++ - } - - // If the last rune is a path separator, we need to append an empty string to - // represent the last, empty path component. By default, the strings from - // make([]string, ...) will be empty, so we just need to icrement the count - if emptyEnd { - idx++ - } - } - - return ret[:idx] -} - -// Find the first index of a rune in a string, -// ignoring any times the rune is escaped using "\". -func indexRuneWithEscaping(s string, r rune) int { - end := strings.IndexRune(s, r) - if end == -1 { - return -1 - } - if end > 0 && s[end-1] == '\\' { - start := end + utf8.RuneLen(r) - end = indexRuneWithEscaping(s[start:], r) - if end != -1 { - end += start - } - } - return end -} - -// Match returns true if name matches the shell file name pattern. -// The pattern syntax is: -// -// pattern: -// { term } -// term: -// '*' matches any sequence of non-path-separators -// '**' matches any sequence of characters, including -// path separators. -// '?' matches any single non-path-separator character -// '[' [ '^' ] { character-range } ']' -// character class (must be non-empty) -// '{' { term } [ ',' { term } ... ] '}' -// c matches character c (c != '*', '?', '\\', '[') -// '\\' c matches character c -// -// character-range: -// c matches character c (c != '\\', '-', ']') -// '\\' c matches character c -// lo '-' hi matches character c for lo <= c <= hi -// -// Match requires pattern to match all of name, not just a substring. -// The path-separator defaults to the '/' character. The only possible -// returned error is ErrBadPattern, when pattern is malformed. -// -// Note: this is meant as a drop-in replacement for path.Match() which -// always uses '/' as the path separator. If you want to support systems -// which use a different path separator (such as Windows), what you want -// is the PathMatch() function below. -// -func Match(pattern, name string) (bool, error) { - return matchWithSeparator(pattern, name, '/') -} - -// PathMatch is like Match except that it uses your system's path separator. -// For most systems, this will be '/'. However, for Windows, it would be '\\'. -// Note that for systems where the path separator is '\\', escaping is -// disabled. -// -// Note: this is meant as a drop-in replacement for filepath.Match(). -// -func PathMatch(pattern, name string) (bool, error) { - return matchWithSeparator(pattern, name, os.PathSeparator) -} - -// Match returns true if name matches the shell file name pattern. -// The pattern syntax is: -// -// pattern: -// { term } -// term: -// '*' matches any sequence of non-path-separators -// '**' matches any sequence of characters, including -// path separators. -// '?' matches any single non-path-separator character -// '[' [ '^' ] { character-range } ']' -// character class (must be non-empty) -// '{' { term } [ ',' { term } ... ] '}' -// c matches character c (c != '*', '?', '\\', '[') -// '\\' c matches character c -// -// character-range: -// c matches character c (c != '\\', '-', ']') -// '\\' c matches character c, unless separator is '\\' -// lo '-' hi matches character c for lo <= c <= hi -// -// Match requires pattern to match all of name, not just a substring. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -func matchWithSeparator(pattern, name string, separator rune) (bool, error) { - patternComponents := splitPathOnSeparator(pattern, separator) - nameComponents := splitPathOnSeparator(name, separator) - return doMatching(patternComponents, nameComponents) -} - -func doMatching(patternComponents, nameComponents []string) (matched bool, err error) { - // check for some base-cases - patternLen, nameLen := len(patternComponents), len(nameComponents) - if patternLen == 0 && nameLen == 0 { - return true, nil - } - if patternLen == 0 || nameLen == 0 { - return false, nil - } - - patIdx, nameIdx := 0, 0 - for patIdx < patternLen && nameIdx < nameLen { - if patternComponents[patIdx] == "**" { - // if our last pattern component is a doublestar, we're done - - // doublestar will match any remaining name components, if any. - if patIdx++; patIdx >= patternLen { - return true, nil - } - - // otherwise, try matching remaining components - for ; nameIdx < nameLen; nameIdx++ { - if m, _ := doMatching(patternComponents[patIdx:], nameComponents[nameIdx:]); m { - return true, nil - } - } - return false, nil - } - - // try matching components - matched, err = matchComponent(patternComponents[patIdx], nameComponents[nameIdx]) - if !matched || err != nil { - return - } - - patIdx++ - nameIdx++ - } - return patIdx >= patternLen && nameIdx >= nameLen, nil -} - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of pattern is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// Your system path separator is automatically used. This means on -// systems where the separator is '\\' (Windows), escaping will be -// disabled. -// -// Note: this is meant as a drop-in replacement for filepath.Glob(). -// -func Glob(pattern string) (matches []string, err error) { - patternComponents := splitPathOnSeparator(filepath.ToSlash(pattern), '/') - if len(patternComponents) == 0 { - return nil, nil - } - - // On Windows systems, this will return the drive name ('C:') for filesystem - // paths, or \\\ for UNC paths. On other systems, it will - // return an empty string. Since absolute paths on non-Windows systems start - // with a slash, patternComponent[0] == volumeName will return true for both - // absolute Windows paths and absolute non-Windows paths, but we need a - // separate check for UNC paths. - volumeName := filepath.VolumeName(pattern) - isWindowsUNC := strings.HasPrefix(pattern, `\\`) - if isWindowsUNC || patternComponents[0] == volumeName { - startComponentIndex := 1 - if isWindowsUNC { - startComponentIndex = 4 - } - return doGlob(fmt.Sprintf("%s%s", volumeName, string(os.PathSeparator)), patternComponents[startComponentIndex:], matches) - } - - // otherwise, it's a relative pattern - return doGlob(".", patternComponents, matches) -} - -// Perform a glob -func doGlob(basedir string, components, matches []string) (m []string, e error) { - m = matches - e = nil - - // figure out how many components we don't need to glob because they're - // just names without patterns - we'll use os.Lstat below to check if that - // path actually exists - patLen := len(components) - patIdx := 0 - for ; patIdx < patLen; patIdx++ { - if strings.IndexAny(components[patIdx], "*?[{\\") >= 0 { - break - } - } - if patIdx > 0 { - basedir = filepath.Join(basedir, filepath.Join(components[0:patIdx]...)) - } - - // Lstat will return an error if the file/directory doesn't exist - fi, err := os.Lstat(basedir) - if err != nil { - return - } - - // if there are no more components, we've found a match - if patIdx >= patLen { - m = append(m, basedir) - return - } - - // otherwise, we need to check each item in the directory... - // first, if basedir is a symlink, follow it... - if (fi.Mode() & os.ModeSymlink) != 0 { - fi, err = os.Stat(basedir) - if err != nil { - return - } - } - - // confirm it's a directory... - if !fi.IsDir() { - return - } - - // read directory - dir, err := os.Open(basedir) - if err != nil { - return - } - defer dir.Close() - - files, _ := dir.Readdir(-1) - lastComponent := (patIdx + 1) >= patLen - if components[patIdx] == "**" { - // if the current component is a doublestar, we'll try depth-first - for _, file := range files { - // if symlink, we may want to follow - if (file.Mode() & os.ModeSymlink) != 0 { - file, err = os.Stat(filepath.Join(basedir, file.Name())) - if err != nil { - continue - } - } - - if file.IsDir() { - // recurse into directories - if lastComponent { - m = append(m, filepath.Join(basedir, file.Name())) - } - m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx:], m) - } else if lastComponent { - // if the pattern's last component is a doublestar, we match filenames, too - m = append(m, filepath.Join(basedir, file.Name())) - } - } - if lastComponent { - return // we're done - } - patIdx++ - lastComponent = (patIdx + 1) >= patLen - } - - // check items in current directory and recurse - var match bool - for _, file := range files { - match, e = matchComponent(components[patIdx], file.Name()) - if e != nil { - return - } - if match { - if lastComponent { - m = append(m, filepath.Join(basedir, file.Name())) - } else { - m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx+1:], m) - } - } - } - return -} - -// Attempt to match a single pattern component with a path component -func matchComponent(pattern, name string) (bool, error) { - // check some base cases - patternLen, nameLen := len(pattern), len(name) - if patternLen == 0 && nameLen == 0 { - return true, nil - } - if patternLen == 0 { - return false, nil - } - if nameLen == 0 && pattern != "*" { - return false, nil - } - - // check for matches one rune at a time - patIdx, nameIdx := 0, 0 - for patIdx < patternLen && nameIdx < nameLen { - patRune, patAdj := utf8.DecodeRuneInString(pattern[patIdx:]) - nameRune, nameAdj := utf8.DecodeRuneInString(name[nameIdx:]) - if patRune == '\\' { - // handle escaped runes - patIdx += patAdj - patRune, patAdj = utf8.DecodeRuneInString(pattern[patIdx:]) - if patRune == utf8.RuneError { - return false, ErrBadPattern - } else if patRune == nameRune { - patIdx += patAdj - nameIdx += nameAdj - } else { - return false, nil - } - } else if patRune == '*' { - // handle stars - if patIdx += patAdj; patIdx >= patternLen { - // a star at the end of a pattern will always - // match the rest of the path - return true, nil - } - - // check if we can make any matches - for ; nameIdx < nameLen; nameIdx += nameAdj { - if m, _ := matchComponent(pattern[patIdx:], name[nameIdx:]); m { - return true, nil - } - } - return false, nil - } else if patRune == '[' { - // handle character sets - patIdx += patAdj - endClass := indexRuneWithEscaping(pattern[patIdx:], ']') - if endClass == -1 { - return false, ErrBadPattern - } - endClass += patIdx - classRunes := []rune(pattern[patIdx:endClass]) - classRunesLen := len(classRunes) - if classRunesLen > 0 { - classIdx := 0 - matchClass := false - if classRunes[0] == '^' { - classIdx++ - } - for classIdx < classRunesLen { - low := classRunes[classIdx] - if low == '-' { - return false, ErrBadPattern - } - classIdx++ - if low == '\\' { - if classIdx < classRunesLen { - low = classRunes[classIdx] - classIdx++ - } else { - return false, ErrBadPattern - } - } - high := low - if classIdx < classRunesLen && classRunes[classIdx] == '-' { - // we have a range of runes - if classIdx++; classIdx >= classRunesLen { - return false, ErrBadPattern - } - high = classRunes[classIdx] - if high == '-' { - return false, ErrBadPattern - } - classIdx++ - if high == '\\' { - if classIdx < classRunesLen { - high = classRunes[classIdx] - classIdx++ - } else { - return false, ErrBadPattern - } - } - } - if low <= nameRune && nameRune <= high { - matchClass = true - } - } - if matchClass == (classRunes[0] == '^') { - return false, nil - } - } else { - return false, ErrBadPattern - } - patIdx = endClass + 1 - nameIdx += nameAdj - } else if patRune == '{' { - // handle alternatives such as {alt1,alt2,...} - patIdx += patAdj - endOptions := indexRuneWithEscaping(pattern[patIdx:], '}') - if endOptions == -1 { - return false, ErrBadPattern - } - endOptions += patIdx - options := splitPathOnSeparator(pattern[patIdx:endOptions], ',') - patIdx = endOptions + 1 - for _, o := range options { - m, e := matchComponent(o+pattern[patIdx:], name[nameIdx:]) - if e != nil { - return false, e - } - if m { - return true, nil - } - } - return false, nil - } else if patRune == '?' || patRune == nameRune { - // handle single-rune wildcard - patIdx += patAdj - nameIdx += nameAdj - } else { - return false, nil - } - } - if patIdx >= patternLen && nameIdx >= nameLen { - return true, nil - } - if nameIdx >= nameLen && pattern[patIdx:] == "*" || pattern[patIdx:] == "**" { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268..00000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index f765a46f..00000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d..00000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9a..00000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b1746163..00000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd3407..00000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b0..00000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc..00000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddb..00000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54d..00000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cd..00000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c737..00000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404cc..00000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 46310962..00000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index c110465d..00000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313..00000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index fe28d15b..00000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,58 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "runtime" - "time" -) - -// DefaultTransport returns a new http.Transport with similar default values to -// http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - ForceAttemptHTTP2: true, - MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with similar default values to -// http.Client, but with a shared Transport. Do not use this function for -// transient clients as it can leak file descriptors over time. Only use this -// for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 05841092..00000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go deleted file mode 100644 index 3c845dc0..00000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go +++ /dev/null @@ -1,48 +0,0 @@ -package cleanhttp - -import ( - "net/http" - "strings" - "unicode" -) - -// HandlerInput provides input options to cleanhttp's handlers -type HandlerInput struct { - ErrStatus int -} - -// PrintablePathCheckHandler is a middleware that ensures the request path -// contains only printable runes. -func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { - // Nil-check on input to make it optional - if input == nil { - input = &HandlerInput{ - ErrStatus: http.StatusBadRequest, - } - } - - // Default to http.StatusBadRequest on error - if input.ErrStatus == 0 { - input.ErrStatus = http.StatusBadRequest - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r != nil { - // Check URL path for non-printable characters - idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { - return !unicode.IsPrint(c) - }) - - if idx != -1 { - w.WriteHeader(input.ErrStatus) - return - } - - if next != nil { - next.ServeHTTP(w, r) - } - } - - return - }) -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore deleted file mode 100644 index caab963a..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea/ -*.iml -*.test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml deleted file mode 100644 index 2df4e7df..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.8.1 - -branches: - only: - - master - -script: make updatedeps test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile deleted file mode 100644 index da17640e..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: - go vet ./... - go test -race ./... - -updatedeps: - go get -f -t -u ./... - go get -f -u ./... - -.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md deleted file mode 100644 index ccdc7e87..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ /dev/null @@ -1,46 +0,0 @@ -go-retryablehttp -================ - -[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: http://travis-ci.org/hashicorp/go-retryablehttp -[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp - -The `retryablehttp` package provides a familiar HTTP client interface with -automatic retries and exponential backoff. It is a thin wrapper over the -standard `net/http` client library and exposes nearly the same public API. This -makes `retryablehttp` very easy to drop into existing programs. - -`retryablehttp` performs automatic retries under certain conditions. Mainly, if -an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received (except 501), then a retry is invoked after a wait -period. Otherwise, the response is returned and left to the caller to -interpret. - -The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) can have the body provided in a number of ways (some more or -less efficient) that allow "rewinding" the request body if the initial request -fails so that the full request can be attempted again. See the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more -details. - -Example Use -=========== - -Using this library should look almost identical to what you would do with -`net/http`. The most simple example of a GET request is shown below: - -```go -resp, err := retryablehttp.Get("/foo") -if err != nil { - panic(err) -} -``` - -The returned response object is an `*http.Response`, the same thing you would -usually get from `net/http`. Had the request failed one or more times, the above -call would block and retry with exponential backoff. - -For more usage and examples see the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go deleted file mode 100644 index 15f1e885..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ /dev/null @@ -1,528 +0,0 @@ -// The retryablehttp package provides a familiar HTTP client interface with -// automatic retries and exponential backoff. It is a thin wrapper over the -// standard net/http client library and exposes nearly the same public API. -// This makes retryablehttp very easy to drop into existing programs. -// -// retryablehttp performs automatic retries under certain conditions. Mainly, if -// an error is returned by the client (connection errors etc), or if a 500-range -// response is received, then a retry is invoked. Otherwise, the response is -// returned and left to the caller to interpret. -// -// Requests which take a request body should provide a non-nil function -// parameter. The best choice is to provide either a function satisfying -// ReaderFunc which provides multiple io.Readers in an efficient manner, a -// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte -// slice. As it is a reference type, and we will wrap it as needed by readers, -// we can efficiently re-use the request body without needing to copy it. If an -// io.Reader (such as a *bytes.Reader) is provided, the full body will be read -// prior to the first request, and will be efficiently re-used for any retries. -// ReadSeeker can be used, but some users have observed occasional data races -// between the net/http library and the Seek functionality of some -// implementations of ReadSeeker, so should be avoided if possible. -package retryablehttp - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "math/rand" - "net/http" - "net/url" - "os" - "strings" - "time" - - cleanhttp "github.com/hashicorp/go-cleanhttp" -) - -var ( - // Default retry configuration - defaultRetryWaitMin = 1 * time.Second - defaultRetryWaitMax = 30 * time.Second - defaultRetryMax = 4 - - // defaultClient is used for performing requests without explicitly making - // a new client. It is purposely private to avoid modifications. - defaultClient = NewClient() - - // We need to consume response bodies to maintain http connections, but - // limit the size we consume to respReadLimit. - respReadLimit = int64(4096) -) - -// ReaderFunc is the type of function that can be given natively to NewRequest -type ReaderFunc func() (io.Reader, error) - -// LenReader is an interface implemented by many in-memory io.Reader's. Used -// for automatically sending the right Content-Length header when possible. -type LenReader interface { - Len() int -} - -// Request wraps the metadata needed to create HTTP requests. -type Request struct { - // body is a seekable reader over the request body payload. This is - // used to rewind the request data in between retries. - body ReaderFunc - - // Embed an HTTP request directly. This makes a *Request act exactly - // like an *http.Request so that all meta methods are supported. - *http.Request -} - -// WithContext returns wrapped Request with a shallow copy of underlying *http.Request -// with its context changed to ctx. The provided ctx must be non-nil. -func (r *Request) WithContext(ctx context.Context) *Request { - r.Request = r.Request.WithContext(ctx) - return r -} - -// BodyBytes allows accessing the request body. It is an analogue to -// http.Request's Body variable, but it returns a copy of the underlying data -// rather than consuming it. -// -// This function is not thread-safe; do not call it at the same time as another -// call, or at the same time this request is being used with Client.Do. -func (r *Request) BodyBytes() ([]byte, error) { - if r.body == nil { - return nil, nil - } - body, err := r.body() - if err != nil { - return nil, err - } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(body) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// NewRequest creates a new wrapped request. -func NewRequest(method, url string, rawBody interface{}) (*Request, error) { - var err error - var body ReaderFunc - var contentLength int64 - - if rawBody != nil { - switch rawBody.(type) { - // If they gave us a function already, great! Use it. - case ReaderFunc: - body = rawBody.(ReaderFunc) - tmp, err := body() - if err != nil { - return nil, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - case func() (io.Reader, error): - body = rawBody.(func() (io.Reader, error)) - tmp, err := body() - if err != nil { - return nil, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - // If a regular byte slice, we can read it over and over via new - // readers - case []byte: - buf := rawBody.([]byte) - body = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // If a bytes.Buffer we can read the underlying byte slice over and - // over - case *bytes.Buffer: - buf := rawBody.(*bytes.Buffer) - body = func() (io.Reader, error) { - return bytes.NewReader(buf.Bytes()), nil - } - contentLength = int64(buf.Len()) - - // We prioritize *bytes.Reader here because we don't really want to - // deal with it seeking so want it to match here instead of the - // io.ReadSeeker case. - case *bytes.Reader: - buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader)) - if err != nil { - return nil, err - } - body = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // Compat case - case io.ReadSeeker: - raw := rawBody.(io.ReadSeeker) - body = func() (io.Reader, error) { - raw.Seek(0, 0) - return ioutil.NopCloser(raw), nil - } - if lr, ok := raw.(LenReader); ok { - contentLength = int64(lr.Len()) - } - - // Read all in so we can reset - case io.Reader: - buf, err := ioutil.ReadAll(rawBody.(io.Reader)) - if err != nil { - return nil, err - } - body = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - default: - return nil, fmt.Errorf("cannot handle type %T", rawBody) - } - } - - httpReq, err := http.NewRequest(method, url, nil) - if err != nil { - return nil, err - } - httpReq.ContentLength = contentLength - - return &Request{body, httpReq}, nil -} - -// Logger interface allows to use other loggers than -// standard log.Logger. -type Logger interface { - Printf(string, ...interface{}) -} - -// RequestLogHook allows a function to run before each retry. The HTTP -// request which will be made, and the retry number (0 for the initial -// request) are available to users. The internal logger is exposed to -// consumers. -type RequestLogHook func(Logger, *http.Request, int) - -// ResponseLogHook is like RequestLogHook, but allows running a function -// on each HTTP response. This function will be invoked at the end of -// every HTTP request executed, regardless of whether a subsequent retry -// needs to be performed or not. If the response body is read or closed -// from this method, this will affect the response returned from Do(). -type ResponseLogHook func(Logger, *http.Response) - -// CheckRetry specifies a policy for handling retries. It is called -// following each request with the response and error values returned by -// the http.Client. If CheckRetry returns false, the Client stops retrying -// and returns the response to the caller. If CheckRetry returns an error, -// that error value is returned in lieu of the error from the request. The -// Client will close any response body when retrying, but if the retry is -// aborted it is up to the CheckResponse callback to properly close any -// response body before returning. -type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) - -// Backoff specifies a policy for how long to wait between retries. -// It is called after a failing request to determine the amount of time -// that should pass before trying again. -type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration - -// ErrorHandler is called if retries are expired, containing the last status -// from the http library. If not specified, default behavior for the library is -// to close the body and return an error indicating how many tries were -// attempted. If overriding this, be sure to close the body if needed. -type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) - -// Client is used to make HTTP requests. It adds additional functionality -// like automatic retries to tolerate minor outages. -type Client struct { - HTTPClient *http.Client // Internal HTTP client. - Logger Logger // Customer logger instance. - - RetryWaitMin time.Duration // Minimum time to wait - RetryWaitMax time.Duration // Maximum time to wait - RetryMax int // Maximum number of retries - - // RequestLogHook allows a user-supplied function to be called - // before each retry. - RequestLogHook RequestLogHook - - // ResponseLogHook allows a user-supplied function to be called - // with the response from each HTTP request executed. - ResponseLogHook ResponseLogHook - - // CheckRetry specifies the policy for handling retries, and is called - // after each request. The default policy is DefaultRetryPolicy. - CheckRetry CheckRetry - - // Backoff specifies the policy for how long to wait between retries - Backoff Backoff - - // ErrorHandler specifies the custom error handler to use, if any - ErrorHandler ErrorHandler -} - -// NewClient creates a new Client with default settings. -func NewClient() *Client { - return &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Logger: log.New(os.Stderr, "", log.LstdFlags), - RetryWaitMin: defaultRetryWaitMin, - RetryWaitMax: defaultRetryWaitMax, - RetryMax: defaultRetryMax, - CheckRetry: DefaultRetryPolicy, - Backoff: DefaultBackoff, - } -} - -// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which -// will retry on connection errors and server errors. -func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { - // do not retry on context.Canceled or context.DeadlineExceeded - if ctx.Err() != nil { - return false, ctx.Err() - } - - if err != nil { - return true, err - } - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. This will catch - // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { - return true, nil - } - - return false, nil -} - -// DefaultBackoff provides a default callback for Client.Backoff which -// will perform exponential backoff based on the attempt number and limited -// by the provided minimum and maximum durations. -func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - mult := math.Pow(2, float64(attemptNum)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} - -// LinearJitterBackoff provides a callback for Client.Backoff which will -// perform linear backoff based on the attempt number and with jitter to -// prevent a thundering herd. -// -// min and max here are *not* absolute values. The number to be multipled by -// the attempt number will be chosen at random from between them, thus they are -// bounding the jitter. -// -// For instance: -// * To get strictly linear backoff of one second increasing each retry, set -// both to one second (1s, 2s, 3s, 4s, ...) -// * To get a small amount of jitter centered around one second increasing each -// retry, set to around one second, such as a min of 800ms and max of 1200ms -// (892ms, 2102ms, 2945ms, 4312ms, ...) -// * To get extreme jitter, set to a very wide spread, such as a min of 100ms -// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) -func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - // attemptNum always starts at zero but we want to start at 1 for multiplication - attemptNum++ - - if max <= min { - // Unclear what to do here, or they are the same, so return min * - // attemptNum - return min * time.Duration(attemptNum) - } - - // Seed rand; doing this every time is fine - rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - - // Pick a random number that lies somewhere between the min and max and - // multiply by the attemptNum. attemptNum starts at zero so we always - // increment here. We first get a random percentage, then apply that to the - // difference between min and max, and add to min. - jitter := rand.Float64() * float64(max-min) - jitterMin := int64(jitter) + int64(min) - return time.Duration(jitterMin * int64(attemptNum)) -} - -// PassthroughErrorHandler is an ErrorHandler that directly passes through the -// values from the net/http library for the final request. The body is not -// closed. -func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { - return resp, err -} - -// Do wraps calling an HTTP method with retries. -func (c *Client) Do(req *Request) (*http.Response, error) { - if c.Logger != nil { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) - } - - var resp *http.Response - var err error - - for i := 0; ; i++ { - var code int // HTTP response code - - // Always rewind the request body when non-nil. - if req.body != nil { - body, err := req.body() - if err != nil { - return resp, err - } - if c, ok := body.(io.ReadCloser); ok { - req.Request.Body = c - } else { - req.Request.Body = ioutil.NopCloser(body) - } - } - - if c.RequestLogHook != nil { - c.RequestLogHook(c.Logger, req.Request, i) - } - - // Attempt the request - resp, err = c.HTTPClient.Do(req.Request) - if resp != nil { - code = resp.StatusCode - } - - // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err) - - if err != nil { - if c.Logger != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) - } - } else { - // Call this here to maintain the behavior of logging all requests, - // even if CheckRetry signals to stop. - if c.ResponseLogHook != nil { - // Call the response logger function if provided. - c.ResponseLogHook(c.Logger, resp) - } - } - - // Now decide if we should continue. - if !checkOK { - if checkErr != nil { - err = checkErr - } - return resp, err - } - - // We do this before drainBody beause there's no need for the I/O if - // we're breaking out - remain := c.RetryMax - i - if remain <= 0 { - break - } - - // We're going to retry, consume any response to reuse the connection. - if err == nil && resp != nil { - c.drainBody(resp.Body) - } - - wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) - desc := fmt.Sprintf("%s %s", req.Method, req.URL) - if code > 0 { - desc = fmt.Sprintf("%s (status: %d)", desc, code) - } - if c.Logger != nil { - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) - } - time.Sleep(wait) - } - - if c.ErrorHandler != nil { - return c.ErrorHandler(resp, err, c.RetryMax+1) - } - - // By default, we close the response body and return an error without - // returning the response - if resp != nil { - resp.Body.Close() - } - return nil, fmt.Errorf("%s %s giving up after %d attempts", - req.Method, req.URL, c.RetryMax+1) -} - -// Try to read the response body so we can reuse this connection. -func (c *Client) drainBody(body io.ReadCloser) { - defer body.Close() - _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) - if err != nil { - if c.Logger != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) - } - } -} - -// Get is a shortcut for doing a GET request without making a new client. -func Get(url string) (*http.Response, error) { - return defaultClient.Get(url) -} - -// Get is a convenience helper for doing simple GET requests. -func (c *Client) Get(url string) (*http.Response, error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Head is a shortcut for doing a HEAD request without making a new client. -func Head(url string) (*http.Response, error) { - return defaultClient.Head(url) -} - -// Head is a convenience method for doing simple HEAD requests. -func (c *Client) Head(url string) (*http.Response, error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body interface{}) (*http.Response, error) { - return defaultClient.Post(url, bodyType, body) -} - -// Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return c.Do(req) -} - -// PostForm is a shortcut to perform a POST with form data without creating -// a new client. -func PostForm(url string, data url.Values) (*http.Response, error) { - return defaultClient.PostForm(url, data) -} - -// PostForm is a convenience method for doing simple POST operations using -// pre-filled url.Values form data. -func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b..00000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a321..00000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f..00000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326..00000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index bed9ebbe..00000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,729 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for _, f := range fields { - field, fieldValue := f.field, f.val - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - fieldName := field.Name - - tagValue := field.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, fieldValue) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - fieldValue.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, fieldValue) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { - return err - } - } - - decodedFields = append(decodedFields, field.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b5..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381d..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index 64c83bcf..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,532 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")), - } - } - - // key=#comment - // val - if p.lineComment != nil { - o.LineComment, p.lineComment = p.lineComment, nil - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), - } - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 624a18fe..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,652 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == utf8.RuneError && size == 1 { - s.err("illegal UTF-8 encoding") - return ch - } - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - if ch == '\x00' { - s.err("unexpected null character (0x00)") - return eof - } - - if ch == '\uE123' { - s.err("unicode code point U+E123 reserved for internal use") - return utf8.RuneError - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start && ch != eof { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f07..00000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index fe3f0f09..00000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72..00000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3ee..00000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c29..00000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4..00000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md deleted file mode 100644 index f59ce92e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md +++ /dev/null @@ -1,184 +0,0 @@ -# HCL Dynamic Blocks Extension - -This HCL extension implements a special block type named "dynamic" that can -be used to dynamically generate blocks of other types by iterating over -collection values. - -Normally the block structure in an HCL configuration file is rigid, even -though dynamic expressions can be used within attribute values. This is -convenient for most applications since it allows the overall structure of -the document to be decoded easily, but in some applications it is desirable -to allow dynamic block generation within certain portions of the configuration. - -Dynamic block generation is performed using the `dynamic` block type: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - dynamic "nested" { - for_each = ["a", "b", "c"] - iterator = nested - content { - foo = "dynamic block ${nested.value}" - } - } - - nested { - foo = "static block 2" - } -} -``` - -The above is interpreted as if it were written as follows: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - nested { - foo = "dynamic block a" - } - - nested { - foo = "dynamic block b" - } - - nested { - foo = "dynamic block c" - } - - nested { - foo = "static block 2" - } -} -``` - -Since HCL block syntax is not normally exposed to the possibility of unknown -values, this extension must make some compromises when asked to iterate over -an unknown collection. If the length of the collection cannot be statically -recognized (because it is an unknown value of list, map, or set type) then -the `dynamic` construct will generate a _single_ dynamic block whose iterator -key and value are both unknown values of the dynamic pseudo-type, thus causing -any attribute values derived from iteration to appear as unknown values. There -is no explicit representation of the fact that the length of the collection may -eventually be different than one. - -## Usage - -Pass a body to function `Expand` to obtain a new body that will, on access -to its content, evaluate and expand any nested `dynamic` blocks. -Dynamic block processing is also automatically propagated into any nested -blocks that are returned, allowing users to nest dynamic blocks inside -one another and to nest dynamic blocks inside other static blocks. - -HCL structural decoding does not normally have access to an `EvalContext`, so -any variables and functions that should be available to the `for_each` -and `labels` expressions must be passed in when calling `Expand`. Expressions -within the `content` block are evaluated separately and so can be passed a -separate `EvalContext` if desired, during normal attribute expression -evaluation. - -## Detecting Variables - -Some applications dynamically generate an `EvalContext` by analyzing which -variables are referenced by an expression before evaluating it. - -This unfortunately requires some extra effort when this analysis is required -for the context passed to `Expand`: the HCL API requires a schema to be -provided in order to do any analysis of the blocks in a body, but the low-level -schema model provides a description of only one level of nested blocks at -a time, and thus a new schema must be provided for each additional level of -nesting. - -To make this arduous process as convenient as possible, this package provides -a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` -instance that can be used to find variables directly in a given body and also -determine which nested blocks require recursive calls. Using this mechanism -requires that the caller be able to look up a schema given a nested block type. -For _simple_ formats where a specific block type name always has the same schema -regardless of context, a walk can be implemented as follows: - -```go -func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { - vars, children := node.Visit(schema) - - for _, child := range children { - var childSchema *hcl.BodySchema - switch child.BlockTypeName { - case "a": - childSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "b", - LabelNames: []string{"key"}, - }, - }, - } - case "b": - childSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "val", - Required: true, - }, - }, - } - default: - // Should never happen, because the above cases should be exhaustive - // for the application's configuration format. - panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) - } - - vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) - } -} -``` - -### Detecting Variables with `hcldec` Specifications - -For applications that use the higher-level `hcldec` package to decode nested -configuration structures into `cty` values, the same specification can be used -to automatically drive the recursive variable-detection walk described above. - -The helper function `ForEachVariablesHCLDec` allows an entire recursive -configuration structure to be analyzed in a single call given a `hcldec.Spec` -that describes the nested block structure. This means a `hcldec`-based -application can support dynamic blocks with only a little additional effort: - -```go -func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { - // Determine which variables are needed to expand dynamic blocks - neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) - - // Build a suitable EvalContext and expand dynamic blocks - dynCtx := buildEvalContext(neededForDynamic) - dynBody := dynblock.Expand(body, dynCtx) - - // Determine which variables are needed to fully decode the expanded body - // This will analyze expressions that came both from static blocks in the - // original body and from blocks that were dynamically added by Expand. - neededForDecode := hcldec.Variables(dynBody, spec) - - // Build a suitable EvalContext and then fully decode the body as per the - // hcldec specification. - decCtx := buildEvalContext(neededForDecode) - return hcldec.Decode(dynBody, spec, decCtx) -} - -func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { - // (to be implemented by your application) -} -``` - -# Performance - -This extension is going quite harshly against the grain of the HCL API, and -so it uses lots of wrapping objects and temporary data structures to get its -work done. HCL in general is not suitable for use in high-performance situations -or situations sensitive to memory pressure, but that is _especially_ true for -this extension. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go deleted file mode 100644 index 0b68a7ae..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go +++ /dev/null @@ -1,248 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// expandBody wraps another hcl.Body and expands any "dynamic" blocks found -// inside whenever Content or PartialContent is called. -type expandBody struct { - original hcl.Body - forEachCtx *hcl.EvalContext - iteration *iteration // non-nil if we're nested inside another "dynamic" block - - // These are used with PartialContent to produce a "remaining items" - // body to return. They are nil on all bodies fresh out of the transformer. - // - // Note that this is re-implemented here rather than delegating to the - // existing support required by the underlying body because we need to - // retain access to the entire original body on subsequent decode operations - // so we can retain any "dynamic" blocks for types we didn't take consume - // on the first pass. - hiddenAttrs map[string]struct{} - hiddenBlocks map[string]hcl.BlockHeaderSchema -} - -func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, diags := b.original.Content(extSchema) - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - return content, diags -} - -func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, _, diags := b.original.PartialContent(extSchema) - // We discard the "remain" argument above because we're going to construct - // our own remain that also takes into account remaining "dynamic" blocks. - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - remain := &expandBody{ - original: b.original, - forEachCtx: b.forEachCtx, - iteration: b.iteration, - hiddenAttrs: make(map[string]struct{}), - hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), - } - for name := range b.hiddenAttrs { - remain.hiddenAttrs[name] = struct{}{} - } - for typeName, blockS := range b.hiddenBlocks { - remain.hiddenBlocks[typeName] = blockS - } - for _, attrS := range schema.Attributes { - remain.hiddenAttrs[attrS.Name] = struct{}{} - } - for _, blockS := range schema.Blocks { - remain.hiddenBlocks[blockS.Type] = blockS - } - - return content, remain, diags -} - -func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - // If we have any hiddenBlocks then we also need to register those here - // so that a call to "Content" on the underlying body won't fail. - // (We'll filter these out again once we process the result of either - // Content or PartialContent.) - for _, blockS := range b.hiddenBlocks { - extSchema.Blocks = append(extSchema.Blocks, blockS) - } - - // If we have any hiddenAttrs then we also need to register these, for - // the same reason as we deal with hiddenBlocks above. - if len(b.hiddenAttrs) != 0 { - newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) - copy(newAttrs, extSchema.Attributes) - for name := range b.hiddenAttrs { - newAttrs = append(newAttrs, hcl.AttributeSchema{ - Name: name, - Required: false, - }) - } - extSchema.Attributes = newAttrs - } - - return extSchema -} - -func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { - if len(b.hiddenAttrs) == 0 && b.iteration == nil { - // Easy path: just pass through the attrs from the original body verbatim - return rawAttrs - } - - // Otherwise we have some work to do: we must filter out any attributes - // that are hidden (since a previous PartialContent call already saw these) - // and wrap the expressions of the inner attributes so that they will - // have access to our iteration variables. - attrs := make(hcl.Attributes, len(rawAttrs)) - for name, rawAttr := range rawAttrs { - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - if b.iteration != nil { - attr := *rawAttr // shallow copy so we can mutate it - attr.Expr = exprWrap{ - Expression: attr.Expr, - i: b.iteration, - } - attrs[name] = &attr - } else { - // If we have no active iteration then no wrapping is required. - attrs[name] = rawAttr - } - } - return attrs -} - -func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { - var blocks hcl.Blocks - var diags hcl.Diagnostics - - for _, rawBlock := range rawBlocks { - switch rawBlock.Type { - case "dynamic": - realBlockType := rawBlock.Labels[0] - if _, hidden := b.hiddenBlocks[realBlockType]; hidden { - continue - } - - var blockS *hcl.BlockHeaderSchema - for _, candidate := range schema.Blocks { - if candidate.Type == realBlockType { - blockS = &candidate - break - } - } - if blockS == nil { - // Not a block type that the caller requested. - if !partial { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported block type", - Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), - Subject: &rawBlock.LabelRanges[0], - }) - } - continue - } - - spec, specDiags := b.decodeSpec(blockS, rawBlock) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - continue - } - - if spec.forEachVal.IsKnown() { - for it := spec.forEachVal.ElementIterator(); it.Next(); { - key, value := it.Element() - i := b.iteration.MakeChild(spec.iteratorName, key, value) - - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - // Attach our new iteration context so that attributes - // and other nested blocks can refer to our iterator. - block.Body = b.expandChild(block.Body, i) - blocks = append(blocks, block) - } - } - } else { - // If our top-level iteration value isn't known then we - // substitute an unknownBody, which will cause the entire block - // to evaluate to an unknown value. - i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - block.Body = unknownBody{b.expandChild(block.Body, i)} - blocks = append(blocks, block) - } - } - - default: - if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { - // A static block doesn't create a new iteration context, but - // it does need to inherit _our own_ iteration context in - // case it contains expressions that refer to our inherited - // iterators, or nested "dynamic" blocks. - expandedBlock := *rawBlock // shallow copy - expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) - blocks = append(blocks, &expandedBlock) - } - } - } - - return blocks, diags -} - -func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { - chiCtx := i.EvalContext(b.forEachCtx) - ret := Expand(child, chiCtx) - ret.(*expandBody).iteration = i - return ret -} - -func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // blocks aren't allowed in JustAttributes mode and this body can - // only produce blocks, so we'll just pass straight through to our - // underlying body here. - return b.original.JustAttributes() -} - -func (b *expandBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go deleted file mode 100644 index 98a51ead..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go +++ /dev/null @@ -1,215 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -type expandSpec struct { - blockType string - blockTypeRange hcl.Range - defRange hcl.Range - forEachVal cty.Value - iteratorName string - labelExprs []hcl.Expression - contentBody hcl.Body - inherited map[string]*iteration -} - -func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var schema *hcl.BodySchema - if len(blockS.LabelNames) != 0 { - schema = dynamicBlockBodySchemaLabels - } else { - schema = dynamicBlockBodySchemaNoLabels - } - - specContent, specDiags := rawSpec.Body.Content(schema) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - return nil, diags - } - - //// for_each attribute - - eachAttr := specContent.Attributes["for_each"] - eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) - diags = append(diags, eachDiags...) - - if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { - // We skip this error for DynamicPseudoType because that means we either - // have a null (which is checked immediately below) or an unknown - // (which is handled in the expandBody Content methods). - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - if eachVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: "Cannot use a null value in for_each.", - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - - //// iterator attribute - - iteratorName := blockS.Type - if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { - itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) - diags = append(diags, itDiags...) - if itDiags.HasErrors() { - return nil, diags - } - - if len(itTraversal) != 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic iterator name", - Detail: "Dynamic iterator must be a single variable name.", - Subject: itTraversal.SourceRange().Ptr(), - }) - return nil, diags - } - - iteratorName = itTraversal.RootName() - } - - var labelExprs []hcl.Expression - if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { - var labelDiags hcl.Diagnostics - labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - if len(labelExprs) > len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic block label", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), - }) - return nil, diags - } else if len(labelExprs) < len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Insufficient dynamic block labels", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelsAttr.Expr.Range().Ptr(), - }) - return nil, diags - } - } - - // Since our schema requests only blocks of type "content", we can assume - // that all entries in specContent.Blocks are content blocks. - if len(specContent.Blocks) == 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing dynamic content block", - Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", - Subject: &specContent.MissingItemRange, - }) - return nil, diags - } - if len(specContent.Blocks) > 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic content block", - Detail: "Only one nested content block is allowed for each dynamic block.", - Subject: &specContent.Blocks[1].DefRange, - }) - return nil, diags - } - - return &expandSpec{ - blockType: blockS.Type, - blockTypeRange: rawSpec.LabelRanges[0], - defRange: rawSpec.DefRange, - forEachVal: eachVal, - iteratorName: iteratorName, - labelExprs: labelExprs, - contentBody: specContent.Blocks[0].Body, - }, diags -} - -func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { - var diags hcl.Diagnostics - var labels []string - var labelRanges []hcl.Range - lCtx := i.EvalContext(ctx) - for _, labelExpr := range s.labelExprs { - labelVal, labelDiags := labelExpr.Value(lCtx) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - var convErr error - labelVal, convErr = convert.Convert(labelVal, cty.String) - if convErr != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if labelVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "Cannot use a null value as a dynamic block label.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if !labelVal.IsKnown() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - - labels = append(labels, labelVal.AsString()) - labelRanges = append(labelRanges, labelExpr.Range()) - } - - block := &hcl.Block{ - Type: s.blockType, - TypeRange: s.blockTypeRange, - Labels: labels, - LabelRanges: labelRanges, - DefRange: s.defRange, - Body: s.contentBody, - } - - return block, diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go deleted file mode 100644 index 460a1d2a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go +++ /dev/null @@ -1,42 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type exprWrap struct { - hcl.Expression - i *iteration -} - -func (e exprWrap) Variables() []hcl.Traversal { - raw := e.Expression.Variables() - ret := make([]hcl.Traversal, 0, len(raw)) - - // Filter out traversals that refer to our iterator name or any - // iterator we've inherited; we're going to provide those in - // our Value wrapper, so the caller doesn't need to know about them. - for _, traversal := range raw { - rootName := traversal.RootName() - if rootName == e.i.IteratorName { - continue - } - if _, inherited := e.i.Inherited[rootName]; inherited { - continue - } - ret = append(ret, traversal) - } - return ret -} - -func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - extCtx := e.i.EvalContext(ctx) - return e.Expression.Value(extCtx) -} - -// UnwrapExpression returns the expression being wrapped by this instance. -// This allows the original expression to be recovered by hcl.UnwrapExpression. -func (e exprWrap) UnwrapExpression() hcl.Expression { - return e.Expression -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go deleted file mode 100644 index c5663886..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go +++ /dev/null @@ -1,66 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type iteration struct { - IteratorName string - Key cty.Value - Value cty.Value - Inherited map[string]*iteration -} - -func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { - return &iteration{ - IteratorName: s.iteratorName, - Key: key, - Value: value, - Inherited: s.inherited, - } -} - -func (i *iteration) Object() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "key": i.Key, - "value": i.Value, - }) -} - -func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { - new := base.NewChild() - - if i != nil { - new.Variables = map[string]cty.Value{} - for name, otherIt := range i.Inherited { - new.Variables[name] = otherIt.Object() - } - new.Variables[i.IteratorName] = i.Object() - } - - return new -} - -func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { - if i == nil { - // Create entirely new root iteration, then - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - } - } - - inherited := map[string]*iteration{} - for name, otherIt := range i.Inherited { - inherited[name] = otherIt - } - inherited[i.IteratorName] = i - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - Inherited: inherited, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go deleted file mode 100644 index a5bfd94e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go +++ /dev/null @@ -1,47 +0,0 @@ -// Package dynblock provides an extension to HCL that allows dynamic -// declaration of nested blocks in certain contexts via a special block type -// named "dynamic". -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Expand "dynamic" blocks in the given body, returning a new body that -// has those blocks expanded. -// -// The given EvalContext is used when evaluating "for_each" and "labels" -// attributes within dynamic blocks, allowing those expressions access to -// variables and functions beyond the iterator variable created by the -// iteration. -// -// Expand returns no diagnostics because no blocks are actually expanded -// until a call to Content or PartialContent on the returned body, which -// will then expand only the blocks selected by the schema. -// -// "dynamic" blocks are also expanded automatically within nested blocks -// in the given body, including within other dynamic blocks, thus allowing -// multi-dimensional iteration. However, it is not possible to -// dynamically-generate the "dynamic" blocks themselves except through nesting. -// -// parent { -// dynamic "child" { -// for_each = child_objs -// content { -// dynamic "grandchild" { -// for_each = child.value.children -// labels = [grandchild.key] -// content { -// parent_key = child.key -// value = grandchild.value -// } -// } -// } -// } -// } -func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { - return &expandBody{ - original: body, - forEachCtx: ctx, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go deleted file mode 100644 index b3907d6e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go +++ /dev/null @@ -1,50 +0,0 @@ -package dynblock - -import "github.com/hashicorp/hcl/v2" - -var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, -} - -var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - { - Name: "labels", - Required: true, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} - -var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go deleted file mode 100644 index caa20853..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go +++ /dev/null @@ -1,89 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// unknownBody is a funny body that just reports everything inside it as -// unknown. It uses a given other body as a sort of template for what attributes -// and blocks are inside -- including source location information -- but -// subsitutes unknown values of unknown type for all attributes. -// -// This rather odd process is used to handle expansion of dynamic blocks whose -// for_each expression is unknown. Since a block cannot itself be unknown, -// we instead arrange for everything _inside_ the block to be unknown instead, -// to give the best possible approximation. -type unknownBody struct { - template hcl.Body -} - -var _ hcl.Body = unknownBody{} - -// hcldec.UnkownBody impl -func (b unknownBody) Unknown() bool { - return true -} - -func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, diags := b.template.Content(schema) - content = b.fixupContent(content) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, diags -} - -func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - content, remain, diags := b.template.PartialContent(schema) - content = b.fixupContent(content) - remain = unknownBody{remain} // remaining content must also be wrapped - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, remain, diags -} - -func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - attrs, diags := b.template.JustAttributes() - attrs = b.fixupAttrs(attrs) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return attrs, diags -} - -func (b unknownBody) MissingItemRange() hcl.Range { - return b.template.MissingItemRange() -} - -func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { - ret := &hcl.BodyContent{} - ret.Attributes = b.fixupAttrs(got.Attributes) - if len(got.Blocks) > 0 { - ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) - for _, gotBlock := range got.Blocks { - new := *gotBlock // shallow copy - new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown - ret.Blocks = append(ret.Blocks, &new) - } - } - - return ret -} - -func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { - if len(got) == 0 { - return nil - } - ret := make(hcl.Attributes, len(got)) - for name, gotAttr := range got { - new := *gotAttr // shallow copy - new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) - ret[name] = &new - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go deleted file mode 100644 index 19233929..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go +++ /dev/null @@ -1,209 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// WalkVariables begins the recursive process of walking all expressions and -// nested blocks in the given body and its child bodies while taking into -// account any "dynamic" blocks. -// -// This function requires that the caller walk through the nested block -// structure in the given body level-by-level so that an appropriate schema -// can be provided at each level to inform further processing. This workflow -// is thus easiest to use for calling applications that have some higher-level -// schema representation available with which to drive this multi-step -// process. If your application uses the hcldec package, you may be able to -// use VariablesHCLDec instead for a more automatic approach. -func WalkVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - includeContent: true, - } -} - -// WalkExpandVariables is like Variables but it includes only the variables -// required for successful block expansion, ignoring any variables referenced -// inside block contents. The result is the minimal set of all variables -// required for a call to Expand, excluding variables that would only be -// needed to subsequently call Content or PartialContent on the expanded -// body. -func WalkExpandVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - } -} - -type WalkVariablesNode struct { - body hcl.Body - it *iteration - - includeContent bool -} - -type WalkVariablesChild struct { - BlockTypeName string - Node WalkVariablesNode -} - -// Body returns the HCL Body associated with the child node, in case the caller -// wants to do some sort of inspection of it in order to decide what schema -// to pass to Visit. -// -// Most implementations should just fetch a fixed schema based on the -// BlockTypeName field and not access this. Deciding on a schema dynamically -// based on the body is a strange thing to do and generally necessary only if -// your caller is already doing other bizarre things with HCL bodies. -func (c WalkVariablesChild) Body() hcl.Body { - return c.Node.body -} - -// Visit returns the variable traversals required for any "dynamic" blocks -// directly in the body associated with this node, and also returns any child -// nodes that must be visited in order to continue the walk. -// -// Each child node has its associated block type name given in its BlockTypeName -// field, which the calling application should use to determine the appropriate -// schema for the content of each child node and pass it to the child node's -// own Visit method to continue the walk recursively. -func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { - extSchema := n.extendSchema(schema) - container, _, _ := n.body.PartialContent(extSchema) - if container == nil { - return vars, children - } - - children = make([]WalkVariablesChild, 0, len(container.Blocks)) - - if n.includeContent { - for _, attr := range container.Attributes { - for _, traversal := range attr.Expr.Variables() { - var ours, inherited bool - if n.it != nil { - ours = traversal.RootName() == n.it.IteratorName - _, inherited = n.it.Inherited[traversal.RootName()] - } - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - } - - for _, block := range container.Blocks { - switch block.Type { - - case "dynamic": - blockTypeName := block.Labels[0] - inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) - if inner == nil { - continue - } - - iteratorName := blockTypeName - if attr, exists := inner.Attributes["iterator"]; exists { - iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) - if len(iterTraversal) == 0 { - // Ignore this invalid dynamic block, since it'll produce - // an error if someone tries to extract content from it - // later anyway. - continue - } - iteratorName = iterTraversal.RootName() - } - blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) - - if attr, exists := inner.Attributes["for_each"]; exists { - // Filter out iterator names inherited from parent blocks - for _, traversal := range attr.Expr.Variables() { - if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { - vars = append(vars, traversal) - } - } - } - if attr, exists := inner.Attributes["labels"]; exists { - // Filter out both our own iterator name _and_ those inherited - // from parent blocks, since we provide _both_ of these to the - // label expressions. - for _, traversal := range attr.Expr.Variables() { - ours := traversal.RootName() == iteratorName - _, inherited := blockIt.Inherited[traversal.RootName()] - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - - for _, contentBlock := range inner.Blocks { - // We only request "content" blocks in our schema, so we know - // any blocks we find here will be content blocks. We require - // exactly one content block for actual expansion, but we'll - // be more liberal here so that callers can still collect - // variables from erroneous "dynamic" blocks. - children = append(children, WalkVariablesChild{ - BlockTypeName: blockTypeName, - Node: WalkVariablesNode{ - body: contentBlock.Body, - it: blockIt, - includeContent: n.includeContent, - }, - }) - } - - default: - children = append(children, WalkVariablesChild{ - BlockTypeName: block.Type, - Node: WalkVariablesNode{ - body: block.Body, - it: n.it, - includeContent: n.includeContent, - }, - }) - - } - } - - return vars, children -} - -func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - return extSchema -} - -// This is a more relaxed schema than what's in schema.go, since we -// want to maximize the amount of variables we can find even if there -// are erroneous blocks. -var variableDetectionInnerSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: false, - }, - { - Name: "labels", - Required: false, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go deleted file mode 100644 index 907ef3eb..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go +++ /dev/null @@ -1,43 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" -) - -// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec -// specification to automatically drive the recursive walk through nested -// blocks in the given body. -// -// This is a drop-in replacement for hcldec.Variables which is able to treat -// blocks of type "dynamic" in the same special way that dynblock.Expand would, -// exposing both the variables referenced in the "for_each" and "labels" -// arguments and variables used in the nested "content" block. -func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the -// minimal set of variables required to call Expand, ignoring variables that -// are referenced only inside normal block contents. See WalkExpandVariables -// for more information. -func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkExpandVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { - vars, children := node.Visit(hcldec.ImpliedSchema(spec)) - - if len(children) > 0 { - childSpecs := hcldec.ChildBlockTypes(spec) - for _, child := range children { - if childSpec, exists := childSpecs[child.BlockTypeName]; exists { - vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) - } - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md deleted file mode 100644 index 5d56eeca..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# "Try" and "can" functions - -This Go package contains two `cty` functions intended for use in an -`hcl.EvalContext` when evaluating HCL native syntax expressions. - -The first function `try` attempts to evaluate each of its argument expressions -in order until one produces a result without any errors. - -```hcl -try(non_existent_variable, 2) # returns 2 -``` - -If none of the expressions succeed, the function call fails with all of the -errors it encountered. - -The second function `can` is similar except that it ignores the result of -the given expression altogether and simply returns `true` if the expression -produced a successful result or `false` if it produced errors. - -Both of these are primarily intended for working with deep data structures -which might not have a dependable shape. For example, we can use `try` to -attempt to fetch a value from deep inside a data structure but produce a -default value if any step of the traversal fails: - -```hcl -result = try(foo.deep[0].lots.of["traversals"], null) -``` - -The final result to `try` should generally be some sort of constant value that -will always evaluate successfully. - -## Using these functions - -Languages built on HCL can make `try` and `can` available to user code by -exporting them in the `hcl.EvalContext` used for expression evaluation: - -```go -ctx := &hcl.EvalContext{ - Functions: map[string]function.Function{ - "try": tryfunc.TryFunc, - "can": tryfunc.CanFunc, - }, -} -``` diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go deleted file mode 100644 index 2f4862f4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go +++ /dev/null @@ -1,150 +0,0 @@ -// Package tryfunc contains some optional functions that can be exposed in -// HCL-based languages to allow authors to test whether a particular expression -// can succeed and take dynamic action based on that result. -// -// These functions are implemented in terms of the customdecode extension from -// the sibling directory "customdecode", and so they are only useful when -// used within an HCL EvalContext. Other systems using cty functions are -// unlikely to support the HCL-specific "customdecode" extension. -package tryfunc - -import ( - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// TryFunc is a variadic function that tries to evaluate all of is arguments -// in sequence until one succeeds, in which case it returns that result, or -// returns an error if none of them succeed. -var TryFunc function.Function - -// CanFunc tries to evaluate the expression given in its first argument. -var CanFunc function.Function - -func init() { - TryFunc = function.New(&function.Spec{ - VarParam: &function.Parameter{ - Name: "expressions", - Type: customdecode.ExpressionClosureType, - }, - Type: func(args []cty.Value) (cty.Type, error) { - v, err := try(args) - if err != nil { - return cty.NilType, err - } - return v.Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return try(args) - }, - }) - CanFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "expression", - Type: customdecode.ExpressionClosureType, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return can(args[0]) - }, - }) -} - -func try(args []cty.Value) (cty.Value, error) { - if len(args) == 0 { - return cty.NilVal, errors.New("at least one argument is required") - } - - // We'll collect up all of the diagnostics we encounter along the way - // and report them all if none of the expressions succeed, so that the - // user might get some hints on how to make at least one succeed. - var diags hcl.Diagnostics - for _, arg := range args { - closure := customdecode.ExpressionClosureFromVal(arg) - if dependsOnUnknowns(closure.Expression, closure.EvalContext) { - // We can't safely decide if this expression will succeed yet, - // and so our entire result must be unknown until we have - // more information. - return cty.DynamicVal, nil - } - - v, moreDiags := closure.Value() - diags = append(diags, moreDiags...) - if moreDiags.HasErrors() { - continue // try the next one, if there is one to try - } - return v, nil // ignore any accumulated diagnostics if one succeeds - } - - // If we fall out here then none of the expressions succeeded, and so - // we must have at least one diagnostic and we'll return all of them - // so that the user can see the errors related to whichever one they - // were expecting to have succeeded in this case. - // - // Because our function must return a single error value rather than - // diagnostics, we'll construct a suitable error message string - // that will make sense in the context of the function call failure - // diagnostic HCL will eventually wrap this in. - var buf strings.Builder - buf.WriteString("no expression succeeded:\n") - for _, diag := range diags { - if diag.Subject != nil { - buf.WriteString(fmt.Sprintf("- %s (at %s)\n %s\n", diag.Summary, diag.Subject, diag.Detail)) - } else { - buf.WriteString(fmt.Sprintf("- %s\n %s\n", diag.Summary, diag.Detail)) - } - } - buf.WriteString("\nAt least one expression must produce a successful result") - return cty.NilVal, errors.New(buf.String()) -} - -func can(arg cty.Value) (cty.Value, error) { - closure := customdecode.ExpressionClosureFromVal(arg) - if dependsOnUnknowns(closure.Expression, closure.EvalContext) { - // Can't decide yet, then. - return cty.UnknownVal(cty.Bool), nil - } - - _, diags := closure.Value() - if diags.HasErrors() { - return cty.False, nil - } - return cty.True, nil -} - -// dependsOnUnknowns returns true if any of the variables that the given -// expression might access are unknown values or contain unknown values. -// -// This is a conservative result that prefers to return true if there's any -// chance that the expression might derive from an unknown value during its -// evaluation; it is likely to produce false-positives for more complex -// expressions involving deep data structures. -func dependsOnUnknowns(expr hcl.Expression, ctx *hcl.EvalContext) bool { - for _, traversal := range expr.Variables() { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // If the traversal returned a definitive error then it must - // not traverse through any unknowns. - continue - } - if !val.IsWhollyKnown() { - // The value will be unknown if either it refers directly to - // an unknown value or if the traversal moves through an unknown - // collection. We're using IsWhollyKnown, so this also catches - // situations where the traversal refers to a compound data - // structure that contains any unknown values. That's important, - // because during evaluation the expression might evaluate more - // deeply into this structure and encounter the unknowns. - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md deleted file mode 100644 index c0fa6ab8..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# HCL Type Expressions Extension - -This HCL extension defines a convention for describing HCL types using function -call and variable reference syntax, allowing configuration formats to include -type information provided by users. - -The type syntax is processed statically from a hcl.Expression, so it cannot -use any of the usual language operators. This is similar to type expressions -in statically-typed programming languages. - -```hcl -variable "example" { - type = list(string) -} -``` - -The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` -functions, and so it relies on the underlying syntax to define how "keyword" -and "call" are interpreted. The above shows how they are interpreted in -the HCL native syntax, while the following shows the same information -expressed in JSON: - -```json -{ - "variable": { - "example": { - "type": "list(string)" - } - } -} -``` - -Notice that since we have additional contextual information that we intend -to allow only calls and keywords the JSON syntax is able to parse the given -string directly as an expression, rather than as a template as would be -the case for normal expression evaluation. - -For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr). - -## Type Expression Syntax - -When expressed in the native syntax, the following expressions are permitted -in a type expression: - -* `string` - string -* `bool` - boolean -* `number` - number -* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) -* `list()` - list of the type given as an argument -* `set()` - set of the type given as an argument -* `map()` - map of the type given as an argument -* `tuple([])` - tuple with the element types given in the single list argument -* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument - -For example: - -* `list(string)` -* `object({name=string,age=number})` -* `map(object({name=string,age=number}))` - -Note that the object constructor syntax is not fully-general for all possible -object types because it requires the attribute names to be valid identifiers. -In practice it is expected that any time an object type is being fixed for -type checking it will be one that has identifiers as its attributes; object -types with weird attributes generally show up only from arbitrary object -constructors in configuration files, which are usually treated either as maps -or as the dynamic pseudo-type. - -### Optional Object Attributes - -As part of object expressions attributes can be marked as optional. Missing -object attributes would typically result in an error when type constraints are -validated or used. Optional missing attributes, however, would not result in an -error. The `cty` ["convert" function](#the-convert-cty-function) will populate -missing optional attributes with null values. - -For example: - -* `object({name=string,age=optional(number)})` - -Optional attributes can also be specified with default values. The -`TypeConstraintWithDefaults` function will return a `Defaults` object that can -be used to populate missing optional attributes with defaults in a given -`cty.Value`. - -For example: - -* `object({name=string,age=optional(number, 0)})` - -## Type Constraints as Values - -Along with defining a convention for writing down types using HCL expression -constructs, this package also includes a mechanism for representing types as -values that can be used as data within an HCL-based language. - -`typeexpr.TypeConstraintType` is a -[`cty` capsule type](https://github.com/zclconf/go-cty/blob/master/docs/types.md#capsule-types) -that encapsulates `cty.Type` values. You can construct such a value directly -using the `TypeConstraintVal` function: - -```go -tyVal := typeexpr.TypeConstraintVal(cty.String) - -// We can unpack the type from a value using TypeConstraintFromVal -ty := typeExpr.TypeConstraintFromVal(tyVal) -``` - -However, the primary purpose of `typeexpr.TypeConstraintType` is to be -specified as the type constraint for an argument, in which case it serves -as a signal for HCL to treat the argument expression as a type constraint -expression as defined above, rather than as a normal value expression. - -"An argument" in the above in practice means the following two locations: - -* As the type constraint for a parameter of a cty function that will be - used in an `hcl.EvalContext`. In that case, function calls in the HCL - native expression syntax will require the argument to be valid type constraint - expression syntax and the function implementation will receive a - `TypeConstraintType` value as the argument value for that parameter. - -* As the type constraint for a `hcldec.AttrSpec` or `hcldec.BlockAttrsSpec` - when decoding an HCL body using `hcldec`. In that case, the attributes - with that type constraint will be required to be valid type constraint - expression syntax and the result will be a `TypeConstraintType` value. - -Note that the special handling of these arguments means that an argument -marked in this way must use the type constraint syntax directly. It is not -valid to pass in a value of `TypeConstraintType` that has been obtained -dynamically via some other expression result. - -`TypeConstraintType` is provided with the intent of using it internally within -application code when incorporating type constraint expression syntax into -an HCL-based language, not to be used for dynamic "programming with types". A -calling application could support programming with types by defining its _own_ -capsule type, but that is not the purpose of `TypeConstraintType`. - -## The "convert" `cty` Function - -Building on the `TypeConstraintType` described in the previous section, this -package also provides `typeexpr.ConvertFunc` which is a cty function that -can be placed into a `cty.EvalContext` (conventionally named "convert") in -order to provide a general type conversion function in an HCL-based language: - -```hcl - foo = convert("true", bool) -``` - -The second parameter uses the mechanism described in the previous section to -require its argument to be a type constraint expression rather than a value -expression. In doing so, it allows converting with any type constraint that -can be expressed in this package's type constraint syntax. In the above example, -the `foo` argument would receive a boolean true, or `cty.True` in `cty` terms. - -The target type constraint must always be provided statically using inline -type constraint syntax. There is no way to _dynamically_ select a type -constraint using this function. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/defaults.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/defaults.go deleted file mode 100644 index f2962139..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/defaults.go +++ /dev/null @@ -1,206 +0,0 @@ -package typeexpr - -import ( - "sort" - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// Defaults represents a type tree which may contain default values for -// optional object attributes at any level. This is used to apply nested -// defaults to a given cty.Value before converting it to a concrete type. -type Defaults struct { - // Type of the node for which these defaults apply. This is necessary in - // order to determine how to inspect the Defaults and Children collections. - Type cty.Type - - // DefaultValues contains the default values for each object attribute, - // indexed by attribute name. - DefaultValues map[string]cty.Value - - // Children is a map of Defaults for elements contained in this type. This - // only applies to structural and collection types. - // - // The map is indexed by string instead of cty.Value because cty.Number - // instances are non-comparable, due to embedding a *big.Float. - // - // Collections have a single element type, which is stored at key "". - Children map[string]*Defaults -} - -// Apply walks the given value, applying specified defaults wherever optional -// attributes are missing. The input and output values may have different -// types, and the result may still require type conversion to the final desired -// type. -// -// This function is permissive and does not report errors, assuming that the -// caller will have better context to report useful type conversion failure -// diagnostics. -func (d *Defaults) Apply(val cty.Value) cty.Value { - return d.apply(val) -} - -func (d *Defaults) apply(v cty.Value) cty.Value { - // We don't apply defaults to null values or unknown values. To be clear, - // we will overwrite children values with defaults if they are null but not - // if the actual value is null. - if !v.IsKnown() || v.IsNull() { - return v - } - - // Also, do nothing if we have no defaults to apply. - if len(d.DefaultValues) == 0 && len(d.Children) == 0 { - return v - } - - v, marks := v.Unmark() - - switch { - case v.Type().IsSetType(), v.Type().IsListType(), v.Type().IsTupleType(): - values := d.applyAsSlice(v) - - if v.Type().IsSetType() { - if len(values) == 0 { - v = cty.SetValEmpty(v.Type().ElementType()) - break - } - if converts := d.unifyAsSlice(values); len(converts) > 0 { - v = cty.SetVal(converts).WithMarks(marks) - break - } - } else if v.Type().IsListType() { - if len(values) == 0 { - v = cty.ListValEmpty(v.Type().ElementType()) - break - } - if converts := d.unifyAsSlice(values); len(converts) > 0 { - v = cty.ListVal(converts) - break - } - } - v = cty.TupleVal(values) - case v.Type().IsObjectType(), v.Type().IsMapType(): - values := d.applyAsMap(v) - - for key, defaultValue := range d.DefaultValues { - if value, ok := values[key]; !ok || value.IsNull() { - if defaults, ok := d.Children[key]; ok { - values[key] = defaults.apply(defaultValue) - continue - } - values[key] = defaultValue - } - } - - if v.Type().IsMapType() { - if len(values) == 0 { - v = cty.MapValEmpty(v.Type().ElementType()) - break - } - if converts := d.unifyAsMap(values); len(converts) > 0 { - v = cty.MapVal(converts) - break - } - } - v = cty.ObjectVal(values) - } - - return v.WithMarks(marks) -} - -func (d *Defaults) applyAsSlice(value cty.Value) []cty.Value { - var elements []cty.Value - for ix, element := range value.AsValueSlice() { - if childDefaults := d.getChild(ix); childDefaults != nil { - element = childDefaults.apply(element) - elements = append(elements, element) - continue - } - elements = append(elements, element) - } - return elements -} - -func (d *Defaults) applyAsMap(value cty.Value) map[string]cty.Value { - elements := make(map[string]cty.Value) - for key, element := range value.AsValueMap() { - if childDefaults := d.getChild(key); childDefaults != nil { - elements[key] = childDefaults.apply(element) - continue - } - elements[key] = element - } - return elements -} - -func (d *Defaults) getChild(key interface{}) *Defaults { - switch { - case d.Type.IsMapType(), d.Type.IsSetType(), d.Type.IsListType(): - return d.Children[""] - case d.Type.IsTupleType(): - return d.Children[strconv.Itoa(key.(int))] - case d.Type.IsObjectType(): - return d.Children[key.(string)] - default: - return nil - } -} - -func (d *Defaults) unifyAsSlice(values []cty.Value) []cty.Value { - var types []cty.Type - for _, value := range values { - types = append(types, value.Type()) - } - unify, conversions := convert.UnifyUnsafe(types) - if unify == cty.NilType { - return nil - } - - var converts []cty.Value - for ix := 0; ix < len(conversions); ix++ { - if conversions[ix] == nil { - converts = append(converts, values[ix]) - continue - } - - converted, err := conversions[ix](values[ix]) - if err != nil { - return nil - } - converts = append(converts, converted) - } - return converts -} - -func (d *Defaults) unifyAsMap(values map[string]cty.Value) map[string]cty.Value { - var keys []string - for key := range values { - keys = append(keys, key) - } - sort.Strings(keys) - - var types []cty.Type - for _, key := range keys { - types = append(types, values[key].Type()) - } - unify, conversions := convert.UnifyUnsafe(types) - if unify == cty.NilType { - return nil - } - - converts := make(map[string]cty.Value) - for ix, key := range keys { - if conversions[ix] == nil { - converts[key] = values[key] - continue - } - - var err error - if converts[key], err = conversions[ix](values[key]); err != nil { - return nil - } - } - return converts -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go deleted file mode 100644 index c4b37957..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package typeexpr extends HCL with a convention for describing HCL types -// within configuration files. -// -// The type syntax is processed statically from a hcl.Expression, so it cannot -// use any of the usual language operators. This is similar to type expressions -// in statically-typed programming languages. -// -// variable "example" { -// type = list(string) -// } -package typeexpr diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go deleted file mode 100644 index 890cf8ed..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go +++ /dev/null @@ -1,343 +0,0 @@ -package typeexpr - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -const invalidTypeSummary = "Invalid type specification" - -// getType is the internal implementation of Type, TypeConstraint, and -// TypeConstraintWithDefaults, using the passed flags to distinguish. When -// `constraint` is true, the "any" keyword can be used in place of a concrete -// type. When `withDefaults` is true, the "optional" call expression supports -// an additional argument describing a default value. -func getType(expr hcl.Expression, constraint, withDefaults bool) (cty.Type, *Defaults, hcl.Diagnostics) { - // First we'll try for one of our keywords - kw := hcl.ExprAsKeyword(expr) - switch kw { - case "bool": - return cty.Bool, nil, nil - case "string": - return cty.String, nil, nil - case "number": - return cty.Number, nil, nil - case "any": - if constraint { - return cty.DynamicPseudoType, nil, nil - } - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), - Subject: expr.Range().Ptr(), - }} - case "list", "map", "set": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), - Subject: expr.Range().Ptr(), - }} - case "object": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: expr.Range().Ptr(), - }} - case "tuple": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: expr.Range().Ptr(), - }} - case "": - // okay! we'll fall through and try processing as a call, then. - default: - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), - Subject: expr.Range().Ptr(), - }} - } - - // If we get down here then our expression isn't just a keyword, so we'll - // try to process it as a call instead. - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", - Subject: expr.Range().Ptr(), - }} - } - - switch call.Name { - case "bool", "string", "number": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), - Subject: &call.ArgsRange, - }} - case "any": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Type constraint keyword %q does not expect arguments.", call.Name), - Subject: &call.ArgsRange, - }} - } - - if len(call.Arguments) != 1 { - contextRange := call.ArgsRange - subjectRange := call.ArgsRange - if len(call.Arguments) > 1 { - // If we have too many arguments (as opposed to too _few_) then - // we'll highlight the extraneous arguments as the diagnostic - // subject. - subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) - } - - switch call.Name { - case "list", "set", "map": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), - Subject: &subjectRange, - Context: &contextRange, - }} - case "object": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: &subjectRange, - Context: &contextRange, - }} - case "tuple": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: &subjectRange, - Context: &contextRange, - }} - } - } - - switch call.Name { - - case "list": - ety, defaults, diags := getType(call.Arguments[0], constraint, withDefaults) - ty := cty.List(ety) - return ty, collectionDefaults(ty, defaults), diags - case "set": - ety, defaults, diags := getType(call.Arguments[0], constraint, withDefaults) - ty := cty.Set(ety) - return ty, collectionDefaults(ty, defaults), diags - case "map": - ety, defaults, diags := getType(call.Arguments[0], constraint, withDefaults) - ty := cty.Map(ety) - return ty, collectionDefaults(ty, defaults), diags - case "object": - attrDefs, diags := hcl.ExprMap(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - - atys := make(map[string]cty.Type) - defaultValues := make(map[string]cty.Value) - children := make(map[string]*Defaults) - var optAttrs []string - for _, attrDef := range attrDefs { - attrName := hcl.ExprAsKeyword(attrDef.Key) - if attrName == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object constructor map keys must be attribute names.", - Subject: attrDef.Key.Range().Ptr(), - Context: expr.Range().Ptr(), - }) - continue - } - atyExpr := attrDef.Value - - // the attribute type expression might be wrapped in the special - // modifier optional(...) to indicate an optional attribute. If - // so, we'll unwrap that first and make a note about it being - // optional for when we construct the type below. - var defaultExpr hcl.Expression - if call, callDiags := hcl.ExprCall(atyExpr); !callDiags.HasErrors() { - if call.Name == "optional" { - if len(call.Arguments) < 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Optional attribute modifier requires the attribute type as its argument.", - Subject: call.ArgsRange.Ptr(), - Context: atyExpr.Range().Ptr(), - }) - continue - } - if constraint { - if withDefaults { - switch len(call.Arguments) { - case 2: - defaultExpr = call.Arguments[1] - defaultVal, defaultDiags := defaultExpr.Value(nil) - diags = append(diags, defaultDiags...) - if !defaultDiags.HasErrors() { - optAttrs = append(optAttrs, attrName) - defaultValues[attrName] = defaultVal - } - case 1: - optAttrs = append(optAttrs, attrName) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Optional attribute modifier expects at most two arguments: the attribute type, and a default value.", - Subject: call.ArgsRange.Ptr(), - Context: atyExpr.Range().Ptr(), - }) - } - } else { - if len(call.Arguments) == 1 { - optAttrs = append(optAttrs, attrName) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Optional attribute modifier expects only one argument: the attribute type.", - Subject: call.ArgsRange.Ptr(), - Context: atyExpr.Range().Ptr(), - }) - } - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Optional attribute modifier is only for type constraints, not for exact types.", - Subject: call.NameRange.Ptr(), - Context: atyExpr.Range().Ptr(), - }) - } - atyExpr = call.Arguments[0] - } - } - - aty, aDefaults, attrDiags := getType(atyExpr, constraint, withDefaults) - diags = append(diags, attrDiags...) - - // If a default is set for an optional attribute, verify that it is - // convertible to the attribute type. - if defaultVal, ok := defaultValues[attrName]; ok { - convertedDefaultVal, err := convert.Convert(defaultVal, aty) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for optional attribute", - Detail: fmt.Sprintf("This default value is not compatible with the attribute's type constraint: %s.", err), - Subject: defaultExpr.Range().Ptr(), - }) - delete(defaultValues, attrName) - } else { - defaultValues[attrName] = convertedDefaultVal - } - } - - atys[attrName] = aty - if aDefaults != nil { - children[attrName] = aDefaults - } - } - ty := cty.ObjectWithOptionalAttrs(atys, optAttrs) - return ty, structuredDefaults(ty, defaultValues, children), diags - case "tuple": - elemDefs, diags := hcl.ExprList(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Tuple type constructor requires a list of element types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - etys := make([]cty.Type, len(elemDefs)) - children := make(map[string]*Defaults, len(elemDefs)) - for i, defExpr := range elemDefs { - ety, elemDefaults, elemDiags := getType(defExpr, constraint, withDefaults) - diags = append(diags, elemDiags...) - etys[i] = ety - if elemDefaults != nil { - children[fmt.Sprintf("%d", i)] = elemDefaults - } - } - ty := cty.Tuple(etys) - return ty, structuredDefaults(ty, nil, children), diags - case "optional": - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Keyword %q is valid only as a modifier for object type attributes.", call.Name), - Subject: call.NameRange.Ptr(), - }} - default: - // Can't access call.Arguments in this path because we've not validated - // that it contains exactly one expression here. - return cty.DynamicPseudoType, nil, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), - Subject: expr.Range().Ptr(), - }} - } -} - -func collectionDefaults(ty cty.Type, defaults *Defaults) *Defaults { - if defaults == nil { - return nil - } - return &Defaults{ - Type: ty, - Children: map[string]*Defaults{ - "": defaults, - }, - } -} - -func structuredDefaults(ty cty.Type, defaultValues map[string]cty.Value, children map[string]*Defaults) *Defaults { - if len(defaultValues) == 0 && len(children) == 0 { - return nil - } - - defaults := &Defaults{ - Type: ty, - } - if len(defaultValues) > 0 { - defaults.DefaultValues = defaultValues - } - if len(children) > 0 { - defaults.Children = children - } - - return defaults -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go deleted file mode 100644 index f2e187ef..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go +++ /dev/null @@ -1,139 +0,0 @@ -package typeexpr - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Type attempts to process the given expression as a type expression and, if -// successful, returns the resulting type. If unsuccessful, error diagnostics -// are returned. -func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - ty, _, diags := getType(expr, false, false) - return ty, diags -} - -// TypeConstraint attempts to parse the given expression as a type constraint -// and, if successful, returns the resulting type. If unsuccessful, error -// diagnostics are returned. -// -// A type constraint has the same structure as a type, but it additionally -// allows the keyword "any" to represent cty.DynamicPseudoType, which is often -// used as a wildcard in type checking and type conversion operations. -func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - ty, _, diags := getType(expr, true, false) - return ty, diags -} - -// TypeConstraintWithDefaults attempts to parse the given expression as a type -// constraint which may include default values for object attributes. If -// successful both the resulting type and corresponding defaults are returned. -// If unsuccessful, error diagnostics are returned. -func TypeConstraintWithDefaults(expr hcl.Expression) (cty.Type, *Defaults, hcl.Diagnostics) { - return getType(expr, true, true) -} - -// TypeString returns a string rendering of the given type as it would be -// expected to appear in the HCL native syntax. -// -// This is primarily intended for showing types to the user in an application -// that uses typexpr, where the user can be assumed to be familiar with the -// type expression syntax. In applications that do not use typeexpr these -// results may be confusing to the user and so type.FriendlyName may be -// preferable, even though it's less precise. -// -// TypeString produces reasonable results only for types like what would be -// produced by the Type and TypeConstraint functions. In particular, it cannot -// support capsule types. -func TypeString(ty cty.Type) string { - // Easy cases first - switch ty { - case cty.String: - return "string" - case cty.Bool: - return "bool" - case cty.Number: - return "number" - case cty.DynamicPseudoType: - return "any" - } - - if ty.IsCapsuleType() { - panic("TypeString does not support capsule types") - } - - if ty.IsCollectionType() { - ety := ty.ElementType() - etyString := TypeString(ety) - switch { - case ty.IsListType(): - return fmt.Sprintf("list(%s)", etyString) - case ty.IsSetType(): - return fmt.Sprintf("set(%s)", etyString) - case ty.IsMapType(): - return fmt.Sprintf("map(%s)", etyString) - default: - // Should never happen because the above is exhaustive - panic("unsupported collection type") - } - } - - if ty.IsObjectType() { - var buf bytes.Buffer - buf.WriteString("object({") - atys := ty.AttributeTypes() - names := make([]string, 0, len(atys)) - for name := range atys { - names = append(names, name) - } - sort.Strings(names) - first := true - for _, name := range names { - aty := atys[name] - if !first { - buf.WriteByte(',') - } - if !hclsyntax.ValidIdentifier(name) { - // Should never happen for any type produced by this package, - // but we'll do something reasonable here just so we don't - // produce garbage if someone gives us a hand-assembled object - // type that has weird attribute names. - // Using Go-style quoting here isn't perfect, since it doesn't - // exactly match HCL syntax, but it's fine for an edge-case. - buf.WriteString(fmt.Sprintf("%q", name)) - } else { - buf.WriteString(name) - } - buf.WriteByte('=') - buf.WriteString(TypeString(aty)) - first = false - } - buf.WriteString("})") - return buf.String() - } - - if ty.IsTupleType() { - var buf bytes.Buffer - buf.WriteString("tuple([") - etys := ty.TupleElementTypes() - first := true - for _, ety := range etys { - if !first { - buf.WriteByte(',') - } - buf.WriteString(TypeString(ety)) - first = false - } - buf.WriteString("])") - return buf.String() - } - - // Should never happen because we covered all cases above. - panic(fmt.Errorf("unsupported type %#v", ty)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go deleted file mode 100644 index 5462d82c..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go +++ /dev/null @@ -1,118 +0,0 @@ -package typeexpr - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// TypeConstraintType is a cty capsule type that allows cty type constraints to -// be used as values. -// -// If TypeConstraintType is used in a context supporting the -// customdecode.CustomExpressionDecoder extension then it will implement -// expression decoding using the TypeConstraint function, thus allowing -// type expressions to be used in contexts where value expressions might -// normally be expected, such as in arguments to function calls. -var TypeConstraintType cty.Type - -// TypeConstraintVal constructs a cty.Value whose type is -// TypeConstraintType. -func TypeConstraintVal(ty cty.Type) cty.Value { - return cty.CapsuleVal(TypeConstraintType, &ty) -} - -// TypeConstraintFromVal extracts the type from a cty.Value of -// TypeConstraintType that was previously constructed using TypeConstraintVal. -// -// If the given value isn't a known, non-null value of TypeConstraintType -// then this function will panic. -func TypeConstraintFromVal(v cty.Value) cty.Type { - if !v.Type().Equals(TypeConstraintType) { - panic("value is not of TypeConstraintType") - } - ptr := v.EncapsulatedValue().(*cty.Type) - return *ptr -} - -// ConvertFunc is a cty function that implements type conversions. -// -// Its signature is as follows: -// convert(value, type_constraint) -// -// ...where type_constraint is a type constraint expression as defined by -// typeexpr.TypeConstraint. -// -// It relies on HCL's customdecode extension and so it's not suitable for use -// in non-HCL contexts or if you are using a HCL syntax implementation that -// does not support customdecode for function arguments. However, it _is_ -// supported for function calls in the HCL native expression syntax. -var ConvertFunc function.Function - -func init() { - TypeConstraintType = cty.CapsuleWithOps("type constraint", reflect.TypeOf(cty.Type{}), &cty.CapsuleOps{ - ExtensionData: func(key interface{}) interface{} { - switch key { - case customdecode.CustomExpressionDecoder: - return customdecode.CustomExpressionDecoderFunc( - func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - ty, diags := TypeConstraint(expr) - if diags.HasErrors() { - return cty.NilVal, diags - } - return TypeConstraintVal(ty), nil - }, - ) - default: - return nil - } - }, - TypeGoString: func(_ reflect.Type) string { - return "typeexpr.TypeConstraintType" - }, - GoString: func(raw interface{}) string { - tyPtr := raw.(*cty.Type) - return fmt.Sprintf("typeexpr.TypeConstraintVal(%#v)", *tyPtr) - }, - RawEquals: func(a, b interface{}) bool { - aPtr := a.(*cty.Type) - bPtr := b.(*cty.Type) - return (*aPtr).Equals(*bPtr) - }, - }) - - ConvertFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - { - Name: "type", - Type: TypeConstraintType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - wantTypePtr := args[1].EncapsulatedValue().(*cty.Type) - got, err := convert.Convert(args[0], *wantTypePtr) - if err != nil { - return cty.NilType, function.NewArgError(0, err) - } - return got.Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - v, err := convert.Convert(args[0], retType) - if err != nil { - return cty.NilVal, function.NewArgError(0, err) - } - return v, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go deleted file mode 100644 index 2954f4ce..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go +++ /dev/null @@ -1,320 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// DecodeBody extracts the configuration within the given body into the given -// value. This value must be a non-nil pointer to either a struct or -// a map, where in the former case the configuration will be decoded using -// struct tags and in the latter case only attributes are allowed and their -// values are decoded into the map. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - rv := reflect.ValueOf(val) - if rv.Kind() != reflect.Ptr { - panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) - } - - return decodeBodyToValue(body, ctx, rv.Elem()) -} - -func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - et := val.Type() - switch et.Kind() { - case reflect.Struct: - return decodeBodyToStruct(body, ctx, val) - case reflect.Map: - return decodeBodyToMap(body, ctx, val) - default: - panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) - } -} - -func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - schema, partial := ImpliedBodySchema(val.Interface()) - - var content *hcl.BodyContent - var leftovers hcl.Body - var diags hcl.Diagnostics - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - if content == nil { - return diags - } - - tags := getFieldTags(val.Type()) - - if tags.Body != nil { - fieldIdx := *tags.Body - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - switch { - case bodyType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(body)) - - default: - diags = append(diags, decodeBodyToValue(body, ctx, fieldV)...) - } - } - - if tags.Remain != nil { - fieldIdx := *tags.Remain - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - switch { - case bodyType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(leftovers)) - case attrsType.AssignableTo(field.Type): - attrs, attrsDiags := leftovers.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - fieldV.Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) - } - } - - for name, fieldIdx := range tags.Attributes { - attr := content.Attributes[name] - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - - if attr == nil { - if !exprType.AssignableTo(field.Type) { - continue - } - - // As a special case, if the target is of type hcl.Expression then - // we'll assign an actual expression that evalues to a cty null, - // so the caller can deal with it within the cty realm rather - // than within the Go realm. - synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) - fieldV.Set(reflect.ValueOf(synthExpr)) - continue - } - - switch { - case attrType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr)) - case exprType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr.Expr)) - default: - diags = append(diags, DecodeExpression( - attr.Expr, ctx, fieldV.Addr().Interface(), - )...) - } - } - - blocksByType := content.Blocks.ByType() - - for typeName, fieldIdx := range tags.Blocks { - blocks := blocksByType[typeName] - field := val.Type().Field(fieldIdx) - - ty := field.Type - isSlice := false - isPtr := false - if ty.Kind() == reflect.Slice { - isSlice = true - ty = ty.Elem() - } - if ty.Kind() == reflect.Ptr { - isPtr = true - ty = ty.Elem() - } - - if len(blocks) > 1 && !isSlice { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", typeName), - Detail: fmt.Sprintf( - "Only one %s block is allowed. Another was defined at %s.", - typeName, blocks[0].DefRange.String(), - ), - Subject: &blocks[1].DefRange, - }) - continue - } - - if len(blocks) == 0 { - if isSlice || isPtr { - if val.Field(fieldIdx).IsNil() { - val.Field(fieldIdx).Set(reflect.Zero(field.Type)) - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", typeName), - Detail: fmt.Sprintf("A %s block is required.", typeName), - Subject: body.MissingItemRange().Ptr(), - }) - } - continue - } - - switch { - - case isSlice: - elemType := ty - if isPtr { - elemType = reflect.PtrTo(ty) - } - sli := val.Field(fieldIdx) - if sli.IsNil() { - sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) - } - - for i, block := range blocks { - if isPtr { - if i >= sli.Len() { - sli = reflect.Append(sli, reflect.New(ty)) - } - v := sli.Index(i) - if v.IsNil() { - v = reflect.New(ty) - } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - sli.Index(i).Set(v) - } else { - if i >= sli.Len() { - sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty))) - } - diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) - } - } - - if sli.Len() > len(blocks) { - sli.SetLen(len(blocks)) - } - - val.Field(fieldIdx).Set(sli) - - default: - block := blocks[0] - if isPtr { - v := val.Field(fieldIdx) - if v.IsNil() { - v = reflect.New(ty) - } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - val.Field(fieldIdx).Set(v) - } else { - diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) - } - - } - - } - - return diags -} - -func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - attrs, diags := body.JustAttributes() - if attrs == nil { - return diags - } - - mv := reflect.MakeMap(v.Type()) - - for k, attr := range attrs { - switch { - case attrType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) - case exprType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) - default: - ev := reflect.New(v.Type().Elem()) - diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) - mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) - } - } - - v.Set(mv) - - return diags -} - -func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - diags := decodeBodyToValue(block.Body, ctx, v) - - if len(block.Labels) > 0 { - blockTags := getFieldTags(v.Type()) - for li, lv := range block.Labels { - lfieldIdx := blockTags.Labels[li].FieldIndex - v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) - } - } - - return diags -} - -// DecodeExpression extracts the value of the given expression into the given -// value. This value must be something that gocty is able to decode into, -// since the final decoding is delegated to that package. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - srcVal, diags := expr.Value(ctx) - - convTy, err := gocty.ImpliedType(val) - if err != nil { - panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) - } - - srcVal, err = convert.Convert(srcVal, convTy) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - return diags - } - - err = gocty.FromCtyValue(srcVal, val) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go deleted file mode 100644 index 9dcd970b..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package gohcl allows decoding HCL configurations into Go data structures. -// -// It provides a convenient and concise way of describing the schema for -// configuration and then accessing the resulting data via native Go -// types. -// -// A struct field tag scheme is used, similar to other decoding and -// unmarshalling libraries. The tags are formatted as in the following example: -// -// ThingType string `hcl:"thing_type,attr"` -// -// Within each tag there are two comma-separated tokens. The first is the -// name of the corresponding construct in configuration, while the second -// is a keyword giving the kind of construct expected. The following -// kind keywords are supported: -// -// attr (the default) indicates that the value is to be populated from an attribute -// block indicates that the value is to populated from a block -// label indicates that the value is to populated from a block label -// optional is the same as attr, but the field is optional -// remain indicates that the value is to be populated from the remaining body after populating other fields -// -// "attr" fields may either be of type *hcl.Expression, in which case the raw -// expression is assigned, or of any type accepted by gocty, in which case -// gocty will be used to assign the value to a native Go type. -// -// "block" fields may be a struct that recursively uses the same tags, or a -// slice of such structs, in which case multiple blocks of the corresponding -// type are decoded into the slice. -// -// "body" can be placed on a single field of type hcl.Body to capture -// the full hcl.Body that was decoded for a block. This does not allow leftover -// values like "remain", so a decoding error will still be returned if leftover -// fields are given. If you want to capture the decoding body PLUS leftover -// fields, you must specify a "remain" field as well to prevent errors. The -// body field and the remain field will both contain the leftover fields. -// -// "label" fields are considered only in a struct used as the type of a field -// marked as "block", and are used sequentially to capture the labels of -// the blocks being decoded. In this case, the name token is used only as -// an identifier for the label in diagnostic messages. -// -// "optional" fields behave like "attr" fields, but they are optional -// and will not give parsing errors if they are missing. -// -// "remain" can be placed on a single field that may be either of type -// hcl.Body or hcl.Attributes, in which case any remaining body content is -// placed into this field for delayed processing. If no "remain" field is -// present then any attributes or blocks not matched by another valid tag -// will cause an error diagnostic. -// -// Only a subset of this tagging/typing vocabulary is supported for the -// "Encode" family of functions. See the EncodeIntoBody docs for full details -// on the constraints there. -// -// Broadly-speaking this package deals with two types of error. The first is -// errors in the configuration itself, which are returned as diagnostics -// written with the configuration author as the target audience. The second -// is bugs in the calling program, such as invalid struct tags, which are -// surfaced via panics since there can be no useful runtime handling of such -// errors and they should certainly not be returned to the user as diagnostics. -package gohcl diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go deleted file mode 100644 index d612e09c..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go +++ /dev/null @@ -1,191 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - - "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EncodeIntoBody replaces the contents of the given hclwrite Body with -// attributes and blocks derived from the given value, which must be a -// struct value or a pointer to a struct value with the struct tags defined -// in this package. -// -// This function can work only with fully-decoded data. It will ignore any -// fields tagged as "remain", any fields that decode attributes into either -// hcl.Attribute or hcl.Expression values, and any fields that decode blocks -// into hcl.Attributes values. This function does not have enough information -// to complete the decoding of these types. -// -// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock -// to produce a whole hclwrite.Block including block labels. -// -// As long as a suitable value is given to encode and the destination body -// is non-nil, this function will always complete. It will panic in case of -// any errors in the calling program, such as passing an inappropriate type -// or a nil body. -// -// The layout of the resulting HCL source is derived from the ordering of -// the struct fields, with blank lines around nested blocks of different types. -// Fields representing attributes should usually precede those representing -// blocks so that the attributes can group togather in the result. For more -// control, use the hclwrite API directly. -func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - populateBody(rv, ty, tags, dst) -} - -// EncodeAsBlock creates a new hclwrite.Block populated with the data from -// the given value, which must be a struct or pointer to struct with the -// struct tags defined in this package. -// -// If the given struct type has fields tagged with "label" tags then they -// will be used in order to annotate the created block with labels. -// -// This function has the same constraints as EncodeIntoBody and will panic -// if they are violated. -func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - labels := make([]string, len(tags.Labels)) - for i, lf := range tags.Labels { - lv := rv.Field(lf.FieldIndex) - // We just stringify whatever we find. It should always be a string - // but if not then we'll still do something reasonable. - labels[i] = fmt.Sprintf("%s", lv.Interface()) - } - - block := hclwrite.NewBlock(blockType, labels) - populateBody(rv, ty, tags, block.Body()) - return block -} - -func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { - nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) - namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) - for n, i := range tags.Attributes { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - for n, i := range tags.Blocks { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - sort.SliceStable(namesOrder, func(i, j int) bool { - ni, nj := namesOrder[i], namesOrder[j] - return nameIdxs[ni] < nameIdxs[nj] - }) - - dst.Clear() - - prevWasBlock := false - for _, name := range namesOrder { - fieldIdx := nameIdxs[name] - field := ty.Field(fieldIdx) - fieldTy := field.Type - fieldVal := rv.Field(fieldIdx) - - if fieldTy.Kind() == reflect.Ptr { - fieldTy = fieldTy.Elem() - fieldVal = fieldVal.Elem() - } - - if _, isAttr := tags.Attributes[name]; isAttr { - - if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { - continue // ignore undecoded fields - } - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - if prevWasBlock { - dst.AppendNewline() - prevWasBlock = false - } - - valTy, err := gocty.ImpliedType(fieldVal.Interface()) - if err != nil { - panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) - } - - val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) - if err != nil { - // This should never happen, since we should always be able - // to decode into the implied type. - panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) - } - - dst.SetAttributeValue(name, val) - - } else { // must be a block, then - elemTy := fieldTy - isSeq := false - if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { - isSeq = true - elemTy = elemTy.Elem() - } - - if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { - continue // ignore undecoded fields - } - prevWasBlock = false - - if isSeq { - l := fieldVal.Len() - for i := 0; i < l; i++ { - elemVal := fieldVal.Index(i) - if !elemVal.IsValid() { - continue // ignore (elem value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(elemVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } else { - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(fieldVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go deleted file mode 100644 index df21cc49..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go +++ /dev/null @@ -1,181 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the -// given value, which must be a struct value or a pointer to one. If an -// inappropriate value is passed, this function will panic. -// -// The second return argument indicates whether the given struct includes -// a "remain" field, and thus the returned schema is non-exhaustive. -// -// This uses the tags on the fields of the struct to discover how each -// field's value should be expressed within configuration. If an invalid -// mapping is attempted, this function will panic. -func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { - ty := reflect.TypeOf(val) - - if ty.Kind() == reflect.Ptr { - ty = ty.Elem() - } - - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("given value must be struct, not %T", val)) - } - - var attrSchemas []hcl.AttributeSchema - var blockSchemas []hcl.BlockHeaderSchema - - tags := getFieldTags(ty) - - attrNames := make([]string, 0, len(tags.Attributes)) - for n := range tags.Attributes { - attrNames = append(attrNames, n) - } - sort.Strings(attrNames) - for _, n := range attrNames { - idx := tags.Attributes[n] - optional := tags.Optional[n] - field := ty.Field(idx) - - var required bool - - switch { - case field.Type.AssignableTo(exprType): - // If we're decoding to hcl.Expression then absense can be - // indicated via a null value, so we don't specify that - // the field is required during decoding. - required = false - case field.Type.Kind() != reflect.Ptr && !optional: - required = true - default: - required = false - } - - attrSchemas = append(attrSchemas, hcl.AttributeSchema{ - Name: n, - Required: required, - }) - } - - blockNames := make([]string, 0, len(tags.Blocks)) - for n := range tags.Blocks { - blockNames = append(blockNames, n) - } - sort.Strings(blockNames) - for _, n := range blockNames { - idx := tags.Blocks[n] - field := ty.Field(idx) - fty := field.Type - if fty.Kind() == reflect.Slice { - fty = fty.Elem() - } - if fty.Kind() == reflect.Ptr { - fty = fty.Elem() - } - if fty.Kind() != reflect.Struct { - panic(fmt.Sprintf( - "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, - )) - } - ftags := getFieldTags(fty) - var labelNames []string - if len(ftags.Labels) > 0 { - labelNames = make([]string, len(ftags.Labels)) - for i, l := range ftags.Labels { - labelNames[i] = l.Name - } - } - - blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ - Type: n, - LabelNames: labelNames, - }) - } - - partial = tags.Remain != nil - schema = &hcl.BodySchema{ - Attributes: attrSchemas, - Blocks: blockSchemas, - } - return schema, partial -} - -type fieldTags struct { - Attributes map[string]int - Blocks map[string]int - Labels []labelField - Remain *int - Body *int - Optional map[string]bool -} - -type labelField struct { - FieldIndex int - Name string -} - -func getFieldTags(ty reflect.Type) *fieldTags { - ret := &fieldTags{ - Attributes: map[string]int{}, - Blocks: map[string]int{}, - Optional: map[string]bool{}, - } - - ct := ty.NumField() - for i := 0; i < ct; i++ { - field := ty.Field(i) - tag := field.Tag.Get("hcl") - if tag == "" { - continue - } - - comma := strings.Index(tag, ",") - var name, kind string - if comma != -1 { - name = tag[:comma] - kind = tag[comma+1:] - } else { - name = tag - kind = "attr" - } - - switch kind { - case "attr": - ret.Attributes[name] = i - case "block": - ret.Blocks[name] = i - case "label": - ret.Labels = append(ret.Labels, labelField{ - FieldIndex: i, - Name: name, - }) - case "remain": - if ret.Remain != nil { - panic("only one 'remain' tag is permitted") - } - idx := i // copy, because this loop will continue assigning to i - ret.Remain = &idx - case "body": - if ret.Body != nil { - panic("only one 'body' tag is permitted") - } - idx := i // copy, because this loop will continue assigning to i - ret.Body = &idx - case "optional": - ret.Attributes[name] = i - ret.Optional[name] = true - default: - panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) - } - } - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go deleted file mode 100644 index a8d00f8f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go +++ /dev/null @@ -1,16 +0,0 @@ -package gohcl - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" -) - -var victimExpr hcl.Expression -var victimBody hcl.Body - -var exprType = reflect.TypeOf(&victimExpr).Elem() -var bodyType = reflect.TypeOf(&victimBody).Elem() -var blockType = reflect.TypeOf((*hcl.Block)(nil)) -var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) -var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go deleted file mode 100644 index 71de4519..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go +++ /dev/null @@ -1,21 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -type blockLabel struct { - Value string - Range hcl.Range -} - -func labelsForBlock(block *hcl.Block) []blockLabel { - ret := make([]blockLabel, len(block.Labels)) - for i := range block.Labels { - ret[i] = blockLabel{ - Value: block.Labels[i], - Range: block.LabelRanges[i], - } - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go deleted file mode 100644 index c6e42236..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { - schema := ImpliedSchema(spec) - - var content *hcl.BodyContent - var diags hcl.Diagnostics - var leftovers hcl.Body - - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - - val, valDiags := spec.decode(content, blockLabels, ctx) - diags = append(diags, valDiags...) - - return val, leftovers, diags -} - -func impliedType(spec Spec) cty.Type { - return spec.impliedType() -} - -func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - return spec.sourceRange(content, blockLabels) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go deleted file mode 100644 index 23bfe542..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package hcldec provides a higher-level API for unpacking the content of -// HCL bodies, implemented in terms of the low-level "Content" API exposed -// by the bodies themselves. -// -// It allows decoding an entire nested configuration in a single operation -// by providing a description of the intended structure. -// -// For some applications it may be more convenient to use the "gohcl" -// package, which has a similar purpose but decodes directly into native -// Go data types. hcldec instead targets the cty type system, and thus allows -// a cty-driven application to remain within that type system. -package hcldec diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go deleted file mode 100644 index e2027cfd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go +++ /dev/null @@ -1,23 +0,0 @@ -package hcldec - -import ( - "encoding/gob" -) - -func init() { - // Every Spec implementation should be registered with gob, so that - // specs can be sent over gob channels, such as using - // github.com/hashicorp/go-plugin with plugins that need to describe - // what shape of configuration they are expecting. - gob.Register(ObjectSpec(nil)) - gob.Register(TupleSpec(nil)) - gob.Register((*AttrSpec)(nil)) - gob.Register((*LiteralSpec)(nil)) - gob.Register((*ExprSpec)(nil)) - gob.Register((*BlockSpec)(nil)) - gob.Register((*BlockListSpec)(nil)) - gob.Register((*BlockSetSpec)(nil)) - gob.Register((*BlockMapSpec)(nil)) - gob.Register((*BlockLabelSpec)(nil)) - gob.Register((*DefaultSpec)(nil)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go deleted file mode 100644 index 1fa548d0..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go +++ /dev/null @@ -1,81 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Decode interprets the given body using the given specification and returns -// the resulting value. If the given body is not valid per the spec, error -// diagnostics are returned and the returned value is likely to be incomplete. -// -// The ctx argument may be nil, in which case any references to variables or -// functions will produce error diagnostics. -func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, _, diags := decode(body, nil, ctx, spec, false) - return val, diags -} - -// PartialDecode is like Decode except that it permits "leftover" items in -// the top-level body, which are returned as a new body to allow for -// further processing. -// -// Any descendent block bodies are _not_ decoded partially and thus must -// be fully described by the given specification. -func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { - return decode(body, nil, ctx, spec, true) -} - -// ImpliedType returns the value type that should result from decoding the -// given spec. -func ImpliedType(spec Spec) cty.Type { - return impliedType(spec) -} - -// SourceRange interprets the given body using the given specification and -// then returns the source range of the value that would be used to -// fulfill the spec. -// -// This can be used if application-level validation detects value errors, to -// obtain a reasonable SourceRange to use for generated diagnostics. It works -// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) -// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will -// be less useful the broader the specification, so e.g. a spec that returns -// the entirety of all of the blocks of a given type is likely to be -// _particularly_ arbitrary and useless. -// -// If the given body is not valid per the given spec, the result is best-effort -// and may not actually be something ideal. It's expected that an application -// will already have used Decode or PartialDecode earlier and thus had an -// opportunity to detect and report spec violations. -func SourceRange(body hcl.Body, spec Spec) hcl.Range { - return sourceRange(body, nil, spec) -} - -// ChildBlockTypes returns a map of all of the child block types declared -// by the given spec, with block type names as keys and the associated -// nested body specs as values. -func ChildBlockTypes(spec Spec) map[string]Spec { - ret := map[string]Spec{} - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if bs, ok := s.(blockSpec); ok { - for _, blockS := range bs.blockHeaderSchemata() { - nested := bs.nestedSpec() - if nested != nil { // nil can be returned to dynamically opt out of this interface - ret[blockS.Type] = nested - } - } - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go deleted file mode 100644 index ddbe7fa4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. -// This is the schema that the Decode function will use internally to -// access the content of a given body. -func ImpliedSchema(spec Spec) *hcl.BodySchema { - var attrs []hcl.AttributeSchema - var blocks []hcl.BlockHeaderSchema - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if as, ok := s.(attrSpec); ok { - attrs = append(attrs, as.attrSchemata()...) - } - - if bs, ok := s.(blockSpec); ok { - blocks = append(blocks, bs.blockHeaderSchemata()...) - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return &hcl.BodySchema{ - Attributes: attrs, - Blocks: blocks, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go deleted file mode 100644 index 42cb070d..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go +++ /dev/null @@ -1,1675 +0,0 @@ -package hcldec - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// A Spec is a description of how to decode a hcl.Body to a cty.Value. -// -// The various other types in this package whose names end in "Spec" are -// the spec implementations. The most common top-level spec is ObjectSpec, -// which decodes body content into a cty.Value of an object type. -type Spec interface { - // Perform the decode operation on the given body, in the context of - // the given block (which might be null), using the given eval context. - // - // "block" is provided only by the nested calls performed by the spec - // types that work on block bodies. - decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) - - // Return the cty.Type that should be returned when decoding a body with - // this spec. - impliedType() cty.Type - - // Call the given callback once for each of the nested specs that would - // get decoded with the same body and block as the receiver. This should - // not descend into the nested specs used when decoding blocks. - visitSameBodyChildren(cb visitFunc) - - // Determine the source range of the value that would be returned for the - // spec in the given content, in the context of the given block - // (which might be null). If the corresponding item is missing, return - // a place where it might be inserted. - sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range -} - -type visitFunc func(spec Spec) - -// An ObjectSpec is a Spec that produces a cty.Value of an object type whose -// attributes correspond to the keys of the spec map. -type ObjectSpec map[string]Spec - -// attrSpec is implemented by specs that require attributes from the body. -type attrSpec interface { - attrSchemata() []hcl.AttributeSchema -} - -// blockSpec is implemented by specs that require blocks from the body. -type blockSpec interface { - blockHeaderSchemata() []hcl.BlockHeaderSchema - nestedSpec() Spec -} - -// specNeedingVariables is implemented by specs that can use variables -// from the EvalContext, to declare which variables they need. -type specNeedingVariables interface { - variablesNeeded(content *hcl.BodyContent) []hcl.Traversal -} - -// UnknownBody can be optionally implemented by an hcl.Body instance which may -// be entirely unknown. -type UnknownBody interface { - Unknown() bool -} - -func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make(map[string]cty.Value, len(s)) - var diags hcl.Diagnostics - - for k, spec := range s { - var kd hcl.Diagnostics - vals[k], kd = spec.decode(content, blockLabels, ctx) - diags = append(diags, kd...) - } - - return cty.ObjectVal(vals), diags -} - -func (s ObjectSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyObject - } - - attrTypes := make(map[string]cty.Type) - for k, childSpec := range s { - attrTypes[k] = childSpec.impliedType() - } - return cty.Object(attrTypes) -} - -func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose -// elements correspond to the elements of the spec slice. -type TupleSpec []Spec - -func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make([]cty.Value, len(s)) - var diags hcl.Diagnostics - - for i, spec := range s { - var ed hcl.Diagnostics - vals[i], ed = spec.decode(content, blockLabels, ctx) - diags = append(diags, ed...) - } - - return cty.TupleVal(vals), diags -} - -func (s TupleSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyTuple - } - - attrTypes := make([]cty.Type, len(s)) - for i, childSpec := range s { - attrTypes[i] = childSpec.impliedType() - } - return cty.Tuple(attrTypes) -} - -func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// An AttrSpec is a Spec that evaluates a particular attribute expression in -// the body and returns its resulting value converted to the requested type, -// or produces a diagnostic if the type is incorrect. -type AttrSpec struct { - Name string - Type cty.Type - Required bool -} - -func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - attr, exists := content.Attributes[s.Name] - if !exists { - return nil - } - - return attr.Expr.Variables() -} - -// attrSpec implementation -func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { - return []hcl.AttributeSchema{ - { - Name: s.Name, - Required: s.Required, - }, - } -} - -func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - attr, exists := content.Attributes[s.Name] - if !exists { - return content.MissingItemRange - } - - return attr.Expr.Range() -} - -func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - attr, exists := content.Attributes[s.Name] - if !exists { - // We don't need to check required and emit a diagnostic here, because - // that would already have happened when building "content". - return cty.NullVal(s.Type), nil - } - - if decodeFn := customdecode.CustomExpressionDecoderForType(s.Type); decodeFn != nil { - v, diags := decodeFn(attr.Expr, ctx) - if v == cty.NilVal { - v = cty.UnknownVal(s.Type) - } - return v, diags - } - - val, diags := attr.Expr.Value(ctx) - - convVal, err := convert.Convert(val, s.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect attribute value type", - Detail: fmt.Sprintf( - "Inappropriate value for attribute %q: %s.", - s.Name, err.Error(), - ), - Subject: attr.Expr.Range().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.Range()).Ptr(), - Expression: attr.Expr, - EvalContext: ctx, - }) - // We'll return an unknown value of the _correct_ type so that the - // incomplete result can still be used for some analysis use-cases. - val = cty.UnknownVal(s.Type) - } else { - val = convVal - } - - return val, diags -} - -func (s *AttrSpec) impliedType() cty.Type { - return s.Type -} - -// A LiteralSpec is a Spec that produces the given literal value, ignoring -// the given body. -type LiteralSpec struct { - Value cty.Value -} - -func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Value, nil -} - -func (s *LiteralSpec) impliedType() cty.Type { - return s.Value.Type() -} - -func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No sensible range to return for a literal, so the caller had better - // ensure it doesn't cause any diagnostics. - return hcl.Range{ - Filename: "", - } -} - -// An ExprSpec is a Spec that evaluates the given expression, ignoring the -// given body. -type ExprSpec struct { - Expr hcl.Expression -} - -func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - return s.Expr.Variables() -} - -func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Expr.Value(ctx) -} - -func (s *ExprSpec) impliedType() cty.Type { - // We can't know the type of our expression until we evaluate it - return cty.DynamicPseudoType -} - -func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - return s.Expr.Range() -} - -// A BlockSpec is a Spec that produces a cty.Value by decoding the contents -// of a single nested block of a given type, using a nested spec. -// -// If the Required flag is not set, the nested block may be omitted, in which -// case a null value is produced. If it _is_ set, an error diagnostic is -// produced if there are no nested blocks of the given type. -type BlockSpec struct { - TypeName string - Nested Spec - Required bool -} - -func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return nil - } - - return Variables(childBlock.Body, s.Nested) -} - -func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - if childBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, childBlock.DefRange.String(), - ), - Subject: &candidate.DefRange, - }) - break - } - - childBlock = candidate - } - - if childBlock == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(s.Nested.impliedType()), diags - } - - if s.Nested == nil { - panic("BlockSpec with no Nested Spec") - } - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - return val, diags -} - -func (s *BlockSpec) impliedType() cty.Type { - return s.Nested.impliedType() -} - -func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockListSpec is a Spec that produces a cty list of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockListSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockListSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - - if u, ok := childBlock.Body.(UnknownBody); ok { - if u.Unknown() { - // If any block Body is unknown, then the entire block value - // must be unknown - return cty.UnknownVal(s.impliedType()), diags - } - } - - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - if len(elems) == 0 { - return cty.ListValEmpty(s.Nested.impliedType()), diags - } - - // Since our target is a list, all of the decoded elements must have the - // same type or cty.ListVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - return cty.ListVal(elems), diags -} - -func (s *BlockListSpec) impliedType() cty.Type { - return cty.List(s.Nested.impliedType()) -} - -func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockTupleSpec is a Spec that produces a cty tuple of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// This is similar to BlockListSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockTupleSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockTupleSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - - if u, ok := childBlock.Body.(UnknownBody); ok { - if u.Unknown() { - // If any block Body is unknown, then the entire block value - // must be unknown - return cty.UnknownVal(s.impliedType()), diags - } - } - - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - if len(elems) == 0 { - return cty.EmptyTupleVal, diags - } - - return cty.TupleVal(elems), diags -} - -func (s *BlockTupleSpec) impliedType() cty.Type { - // We can't predict our type, because we don't know how many blocks - // there will be until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockSetSpec is a Spec that produces a cty set of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockSetSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSetSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockSetSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - - if u, ok := childBlock.Body.(UnknownBody); ok { - if u.Unknown() { - // If any block Body is unknown, then the entire block value - // must be unknown - return cty.UnknownVal(s.impliedType()), diags - } - } - - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - if len(elems) == 0 { - return cty.SetValEmpty(s.Nested.impliedType()), diags - } - - // Since our target is a set, all of the decoded elements must have the - // same type or cty.SetVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - return cty.SetVal(elems), diags -} - -func (s *BlockSetSpec) impliedType() cty.Type { - return cty.Set(s.Nested.impliedType()) -} - -func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockMapSpec is a Spec that produces a cty map of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of map structure is created for each of the given label names. -// There must be at least one given label name. -type BlockMapSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockMapSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockMapSpec with no Nested Spec") - } - if ImpliedType(s).HasDynamicTypes() { - panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - if u, ok := childBlock.Body.(UnknownBody); ok { - if u.Unknown() { - // If any block Body is unknown, then the entire block value - // must be unknown - return cty.UnknownVal(s.impliedType()), diags - } - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.MapValEmpty(s.Nested.impliedType()), diags - } - - var ctyMap func(map[string]interface{}, int) cty.Value - ctyMap = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyMap(v.(map[string]interface{}), depth-1) - } - } - return cty.MapVal(vals) - } - - return ctyMap(elems, len(s.LabelNames)), diags -} - -func (s *BlockMapSpec) impliedType() cty.Type { - ret := s.Nested.impliedType() - for _ = range s.LabelNames { - ret = cty.Map(ret) - } - return ret -} - -func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockObjectSpec is a Spec that produces a cty object of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of object structure is created for each of the given label names. -// There must be at least one given label name. -// -// This is similar to BlockMapSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockObjectSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockObjectSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockObjectSpec with no Nested Spec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - if u, ok := childBlock.Body.(UnknownBody); ok { - if u.Unknown() { - // If any block Body is unknown, then the entire block value - // must be unknown - return cty.UnknownVal(s.impliedType()), diags - } - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.EmptyObjectVal, diags - } - - var ctyObj func(map[string]interface{}, int) cty.Value - ctyObj = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyObj(v.(map[string]interface{}), depth-1) - } - } - return cty.ObjectVal(vals) - } - - return ctyObj(elems, len(s.LabelNames)), diags -} - -func (s *BlockObjectSpec) impliedType() cty.Type { - // We can't predict our type, since we don't know how many blocks are - // present and what labels they have until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockAttrsSpec is a Spec that interprets a single block as if it were -// a map of some element type. That is, each attribute within the block -// becomes a key in the resulting map and the attribute's value becomes the -// element value, after conversion to the given element type. The resulting -// value is a cty.Map of the given element type. -// -// This spec imposes a validation constraint that there be exactly one block -// of the given type name and that this block may contain only attributes. The -// block does not accept any labels. -// -// This is an alternative to an AttrSpec of a map type for situations where -// block syntax is desired. Note that block syntax does not permit dynamic -// keys, construction of the result via a "for" expression, etc. In most cases -// an AttrSpec is preferred if the desired result is a map whose keys are -// chosen by the user rather than by schema. -type BlockAttrsSpec struct { - TypeName string - ElementType cty.Type - Required bool -} - -func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// blockSpec implementation -func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: nil, - }, - } -} - -// blockSpec implementation -func (s *BlockAttrsSpec) nestedSpec() Spec { - // This is an odd case: we aren't actually going to apply a nested spec - // in this case, since we're going to interpret the body directly as - // attributes, but we need to return something non-nil so that the - // decoder will recognize this as a block spec. We won't actually be - // using this for anything at decode time. - return noopSpec{} -} - -// specNeedingVariables implementation -func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - - block, _ := s.findBlock(content) - if block == nil { - return nil - } - - var vars []hcl.Traversal - - attrs, diags := block.Body.JustAttributes() - if diags.HasErrors() { - return nil - } - - for _, attr := range attrs { - vars = append(vars, attr.Expr.Variables()...) - } - - // We'll return the variables references in source order so that any - // error messages that result are also in source order. - sort.Slice(vars, func(i, j int) bool { - return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte - }) - - return vars -} - -func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - block, other := s.findBlock(content) - if block == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(cty.Map(s.ElementType)), diags - } - if other != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, block.DefRange.String(), - ), - Subject: &other.DefRange, - }) - } - - attrs, attrDiags := block.Body.JustAttributes() - diags = append(diags, attrDiags...) - - if len(attrs) == 0 { - return cty.MapValEmpty(s.ElementType), diags - } - - vals := make(map[string]cty.Value, len(attrs)) - for name, attr := range attrs { - if decodeFn := customdecode.CustomExpressionDecoderForType(s.ElementType); decodeFn != nil { - attrVal, attrDiags := decodeFn(attr.Expr, ctx) - diags = append(diags, attrDiags...) - if attrVal == cty.NilVal { - attrVal = cty.UnknownVal(s.ElementType) - } - vals[name] = attrVal - continue - } - - attrVal, attrDiags := attr.Expr.Value(ctx) - diags = append(diags, attrDiags...) - - attrVal, err := convert.Convert(attrVal, s.ElementType) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid attribute value", - Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), - Subject: attr.Expr.Range().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.Range()).Ptr(), - Expression: attr.Expr, - EvalContext: ctx, - }) - attrVal = cty.UnknownVal(s.ElementType) - } - - vals[name] = attrVal - } - - return cty.MapVal(vals), diags -} - -func (s *BlockAttrsSpec) impliedType() cty.Type { - return cty.Map(s.ElementType) -} - -func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - block, _ := s.findBlock(content) - if block == nil { - return content.MissingItemRange - } - return block.DefRange -} - -func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - if block != nil { - return block, candidate - } - block = candidate - } - - return block, nil -} - -// A BlockLabelSpec is a Spec that returns a cty.String representing the -// label of the block its given body belongs to, if indeed its given body -// belongs to a block. It is a programming error to use this in a non-block -// context, so this spec will panic in that case. -// -// This spec only works in the nested spec within a BlockSpec, BlockListSpec, -// BlockSetSpec or BlockMapSpec. -// -// The full set of label specs used against a particular block must have a -// consecutive set of indices starting at zero. The maximum index found -// defines how many labels the corresponding blocks must have in cty source. -type BlockLabelSpec struct { - Index int - Name string -} - -func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return cty.StringVal(blockLabels[s.Index].Value), nil -} - -func (s *BlockLabelSpec) impliedType() cty.Type { - return cty.String // labels are always strings -} - -func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return blockLabels[s.Index].Range -} - -func findLabelSpecs(spec Spec) []string { - maxIdx := -1 - var names map[int]string - - var visit visitFunc - visit = func(s Spec) { - if ls, ok := s.(*BlockLabelSpec); ok { - if maxIdx < ls.Index { - maxIdx = ls.Index - } - if names == nil { - names = make(map[int]string) - } - names[ls.Index] = ls.Name - } - s.visitSameBodyChildren(visit) - } - - visit(spec) - - if maxIdx < 0 { - return nil // no labels at all - } - - ret := make([]string, maxIdx+1) - for i := range ret { - name := names[i] - if name == "" { - // Should never happen if the spec is conformant, since we require - // consecutive indices starting at zero. - name = fmt.Sprintf("missing%02d", i) - } - ret[i] = name - } - - return ret -} - -// DefaultSpec is a spec that wraps two specs, evaluating the primary first -// and then evaluating the default if the primary returns a null value. -// -// The two specifications must have the same implied result type for correct -// operation. If not, the result is undefined. -// -// Any requirements imposed by the "Default" spec apply even if "Primary" does -// not return null. For example, if the "Default" spec is for a required -// attribute then that attribute is always required, regardless of the result -// of the "Primary" spec. -// -// The "Default" spec must not describe a nested block, since otherwise the -// result of ChildBlockTypes would not be decidable without evaluation. If -// the default spec _does_ describe a nested block then the result is -// undefined. -type DefaultSpec struct { - Primary Spec - Default Spec -} - -func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Primary) - cb(s.Default) -} - -func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, diags := s.Primary.decode(content, blockLabels, ctx) - if val.IsNull() { - var moreDiags hcl.Diagnostics - val, moreDiags = s.Default.decode(content, blockLabels, ctx) - diags = append(diags, moreDiags...) - } - return val, diags -} - -func (s *DefaultSpec) impliedType() cty.Type { - return s.Primary.impliedType() -} - -// attrSpec implementation -func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { - // We must pass through the union of both of our nested specs so that - // we'll have both values available in the result. - var ret []hcl.AttributeSchema - if as, ok := s.Primary.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - if as, ok := s.Default.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - return ret -} - -// blockSpec implementation -func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - // Only the primary spec may describe a block, since otherwise - // our nestedSpec method below can't know which to return. - if bs, ok := s.Primary.(blockSpec); ok { - return bs.blockHeaderSchemata() - } - return nil -} - -// blockSpec implementation -func (s *DefaultSpec) nestedSpec() Spec { - if bs, ok := s.Primary.(blockSpec); ok { - return bs.nestedSpec() - } - return nil -} - -func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We can't tell from here which of the two specs will ultimately be used - // in our result, so we'll just assume the first. This is usually the right - // choice because the default is often a literal spec that doesn't have a - // reasonable source range to return anyway. - return s.Primary.sourceRange(content, blockLabels) -} - -// TransformExprSpec is a spec that wraps another and then evaluates a given -// hcl.Expression on the result. -// -// The implied type of this spec is determined by evaluating the expression -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -type TransformExprSpec struct { - Wrapped Spec - Expr hcl.Expression - TransformCtx *hcl.EvalContext - VarName string -} - -func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: wrappedVal, - } - resultVal, resultDiags := s.Expr.Value(chiCtx) - diags = append(diags, resultDiags...) - return resultVal, diags -} - -func (s *TransformExprSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: cty.UnknownVal(wrappedTy), - } - resultVal, _ := s.Expr.Value(chiCtx) - return resultVal.Type() -} - -func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// TransformFuncSpec is a spec that wraps another and then evaluates a given -// cty function with the result. The given function must expect exactly one -// argument, where the result of the wrapped spec will be passed. -// -// The implied type of this spec is determined by type-checking the function -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -// -// If the given function produces an error when run, this spec will produce -// a non-user-actionable diagnostic message. It's the caller's responsibility -// to ensure that the given function cannot fail for any non-error result -// of the wrapped spec. -type TransformFuncSpec struct { - Wrapped Spec - Func function.Function -} - -func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) - if err != nil { - // This is not a good example of a diagnostic because it is reporting - // a programming error in the calling application, rather than something - // an end-user could act on. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Transform function failed", - Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), - Subject: s.sourceRange(content, blockLabels).Ptr(), - }) - return cty.UnknownVal(s.impliedType()), diags - } - - return resultVal, diags -} - -func (s *TransformFuncSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) - if err != nil { - // Should never happen with a correctly-configured spec - return cty.DynamicPseudoType - } - - return resultTy -} - -func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// ValidateFuncSpec is a spec that allows for extended -// developer-defined validation. The validation function receives the -// result of the wrapped spec. -// -// The Subject field of the returned Diagnostic is optional. If not -// specified, it is automatically populated with the range covered by -// the wrapped spec. -// -type ValidateSpec struct { - Wrapped Spec - Func func(value cty.Value) hcl.Diagnostics -} - -func (s *ValidateSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *ValidateSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - validateDiags := s.Func(wrappedVal) - // Auto-populate the Subject fields if they weren't set. - for i := range validateDiags { - if validateDiags[i].Subject == nil { - validateDiags[i].Subject = s.sourceRange(content, blockLabels).Ptr() - } - } - - diags = append(diags, validateDiags...) - return wrappedVal, diags -} - -func (s *ValidateSpec) impliedType() cty.Type { - return s.Wrapped.impliedType() -} - -func (s *ValidateSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - return s.Wrapped.sourceRange(content, blockLabels) -} - -// noopSpec is a placeholder spec that does nothing, used in situations where -// a non-nil placeholder spec is required. It is not exported because there is -// no reason to use it directly; it is always an implementation detail only. -type noopSpec struct { -} - -func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return cty.NullVal(cty.DynamicPseudoType), nil -} - -func (s noopSpec) impliedType() cty.Type { - return cty.DynamicPseudoType -} - -func (s noopSpec) visitSameBodyChildren(cb visitFunc) { - // nothing to do -} - -func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No useful range for a noopSpec, and nobody should be calling this anyway. - return hcl.Range{ - Filename: "noopSpec", - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go deleted file mode 100644 index f8440eb6..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Variables processes the given body with the given spec and returns a -// list of the variable traversals that would be required to decode -// the same pairing of body and spec. -// -// This can be used to conditionally populate the variables in the EvalContext -// passed to Decode, for applications where a static scope is insufficient. -// -// If the given body is not compliant with the given schema, the result may -// be incomplete, but that's assumed to be okay because the eventual call -// to Decode will produce error diagnostics anyway. -func Variables(body hcl.Body, spec Spec) []hcl.Traversal { - var vars []hcl.Traversal - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - if vs, ok := spec.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - - var visitFn visitFunc - visitFn = func(s Spec) { - if vs, ok := s.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - s.visitSameBodyChildren(visitFn) - } - spec.visitSameBodyChildren(visitFn) - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go deleted file mode 100644 index 1dc2eccd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go +++ /dev/null @@ -1,135 +0,0 @@ -// Package hclparse has the main API entry point for parsing both HCL native -// syntax and HCL JSON. -// -// The main HCL package also includes SimpleParse and SimpleParseFile which -// can be a simpler interface for the common case where an application just -// needs to parse a single file. The gohcl package simplifies that further -// in its SimpleDecode function, which combines hcl.SimpleParse with decoding -// into Go struct values -// -// Package hclparse, then, is useful for applications that require more fine -// control over parsing or which need to load many separate files and keep -// track of them for possible error reporting or other analysis. -package hclparse - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2/json" -) - -// NOTE: This is the public interface for parsing. The actual parsers are -// in other packages alongside this one, with this package just wrapping them -// to provide a unified interface for the caller across all supported formats. - -// Parser is the main interface for parsing configuration files. As well as -// parsing files, a parser also retains a registry of all of the files it -// has parsed so that multiple attempts to parse the same file will return -// the same object and so the collected files can be used when printing -// diagnostics. -// -// Any diagnostics for parsing a file are only returned once on the first -// call to parse that file. Callers are expected to collect up diagnostics -// and present them together, so returning diagnostics for the same file -// multiple times would create a confusing result. -type Parser struct { - files map[string]*hcl.File -} - -// NewParser creates a new parser, ready to parse configuration files. -func NewParser() *Parser { - return &Parser{ - files: map[string]*hcl.File{}, - } -} - -// ParseHCL parses the given buffer (which is assumed to have been loaded from -// the given filename) as a native-syntax configuration file and returns the -// hcl.File object representing it. -func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) - p.files[filename] = file - return file, diags -} - -// ParseHCLFile reads the given filename and parses it as a native-syntax HCL -// configuration file. An error diagnostic is returned if the given file -// cannot be read. -func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - src, err := ioutil.ReadFile(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), - }, - } - } - - return p.ParseHCL(src, filename) -} - -// ParseJSON parses the given JSON buffer (which is assumed to have been loaded -// from the given filename) and returns the hcl.File object representing it. -func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.Parse(src, filename) - p.files[filename] = file - return file, diags -} - -// ParseJSONFile reads the given filename and parses it as JSON, similarly to -// ParseJSON. An error diagnostic is returned if the given file cannot be read. -func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.ParseFile(filename) - p.files[filename] = file - return file, diags -} - -// AddFile allows a caller to record in a parser a file that was parsed some -// other way, thus allowing it to be included in the registry of sources. -func (p *Parser) AddFile(filename string, file *hcl.File) { - p.files[filename] = file -} - -// Sources returns a map from filenames to the raw source code that was -// read from them. This is intended to be used, for example, to print -// diagnostics with contextual information. -// -// The arrays underlying the returned slices should not be modified. -func (p *Parser) Sources() map[string][]byte { - ret := make(map[string][]byte) - for fn, f := range p.files { - ret[fn] = f.Bytes - } - return ret -} - -// Files returns a map from filenames to the File objects produced from them. -// This is intended to be used, for example, to print diagnostics with -// contextual information. -// -// The returned map and all of the objects it refers to directly or indirectly -// must not be modified. -func (p *Parser) Files() map[string]*hcl.File { - return p.files -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go deleted file mode 100644 index 09041652..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" -) - -type File struct { - inTree - - srcBytes []byte - body *node -} - -// NewEmptyFile constructs a new file with no content, ready to be mutated -// by other calls that append to its body. -func NewEmptyFile() *File { - f := &File{ - inTree: newInTree(), - } - body := newBody() - f.body = f.children.Append(body) - return f -} - -// Body returns the root body of the file, which contains the top-level -// attributes and blocks. -func (f *File) Body() *Body { - return f.body.content.(*Body) -} - -// WriteTo writes the tokens underlying the receiving file to the given writer. -// -// The tokens first have a simple formatting pass applied that adjusts only -// the spaces between them. -func (f *File) WriteTo(wr io.Writer) (int64, error) { - tokens := f.inTree.children.BuildTokens(nil) - format(tokens) - return tokens.WriteTo(wr) -} - -// Bytes returns a buffer containing the source code resulting from the -// tokens underlying the receiving file. If any updates have been made via -// the AST API, these will be reflected in the result. -func (f *File) Bytes() []byte { - buf := &bytes.Buffer{} - f.WriteTo(buf) - return buf.Bytes() -} - -type comments struct { - leafNode - - parent *node - tokens Tokens -} - -func newComments(tokens Tokens) *comments { - return &comments{ - tokens: tokens, - } -} - -func (c *comments) BuildTokens(to Tokens) Tokens { - return c.tokens.BuildTokens(to) -} - -type identifier struct { - leafNode - - parent *node - token *Token -} - -func newIdentifier(token *Token) *identifier { - return &identifier{ - token: token, - } -} - -func (i *identifier) BuildTokens(to Tokens) Tokens { - return append(to, i.token) -} - -func (i *identifier) hasName(name string) bool { - return name == string(i.token.Bytes) -} - -type number struct { - leafNode - - parent *node - token *Token -} - -func newNumber(token *Token) *number { - return &number{ - token: token, - } -} - -func (n *number) BuildTokens(to Tokens) Tokens { - return append(to, n.token) -} - -type quoted struct { - leafNode - - parent *node - tokens Tokens -} - -func newQuoted(tokens Tokens) *quoted { - return "ed{ - tokens: tokens, - } -} - -func (q *quoted) BuildTokens(to Tokens) Tokens { - return q.tokens.BuildTokens(to) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go deleted file mode 100644 index 609419ff..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go +++ /dev/null @@ -1,48 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type Attribute struct { - inTree - - leadComments *node - name *node - expr *node - lineComments *node -} - -func newAttribute() *Attribute { - return &Attribute{ - inTree: newInTree(), - } -} - -func (a *Attribute) init(name string, expr *Expression) { - expr.assertUnattached() - - nameTok := newIdentToken(name) - nameObj := newIdentifier(nameTok) - a.leadComments = a.children.Append(newComments(nil)) - a.name = a.children.Append(nameObj) - a.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }, - }) - a.expr = a.children.Append(expr) - a.expr.list = a.children - a.lineComments = a.children.Append(newComments(nil)) - a.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} - -func (a *Attribute) Expr() *Expression { - return a.expr.content.(*Expression) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go deleted file mode 100644 index edabb26a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go +++ /dev/null @@ -1,177 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Block struct { - inTree - - leadComments *node - typeName *node - labels *node - open *node - body *node - close *node -} - -func newBlock() *Block { - return &Block{ - inTree: newInTree(), - } -} - -// NewBlock constructs a new, empty block with the given type name and labels. -func NewBlock(typeName string, labels []string) *Block { - block := newBlock() - block.init(typeName, labels) - return block -} - -func (b *Block) init(typeName string, labels []string) { - nameTok := newIdentToken(typeName) - nameObj := newIdentifier(nameTok) - b.leadComments = b.children.Append(newComments(nil)) - b.typeName = b.children.Append(nameObj) - labelsObj := newBlockLabels(labels) - b.labels = b.children.Append(labelsObj) - b.open = b.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }, - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) - body := newBody() // initially totally empty; caller can append to it subsequently - b.body = b.children.Append(body) - b.close = b.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }, - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} - -// Body returns the body that represents the content of the receiving block. -// -// Appending to or otherwise modifying this body will make changes to the -// tokens that are generated between the blocks open and close braces. -func (b *Block) Body() *Body { - return b.body.content.(*Body) -} - -// Type returns the type name of the block. -func (b *Block) Type() string { - typeNameObj := b.typeName.content.(*identifier) - return string(typeNameObj.token.Bytes) -} - -// SetType updates the type name of the block to a given name. -func (b *Block) SetType(typeName string) { - nameTok := newIdentToken(typeName) - nameObj := newIdentifier(nameTok) - b.typeName.ReplaceWith(nameObj) -} - -// Labels returns the labels of the block. -func (b *Block) Labels() []string { - return b.labelsObj().Current() -} - -// SetLabels updates the labels of the block to given labels. -// Since we cannot assume that old and new labels are equal in length, -// remove old labels and insert new ones before TokenOBrace. -func (b *Block) SetLabels(labels []string) { - b.labelsObj().Replace(labels) -} - -// labelsObj returns the internal node content representation of the block -// labels. This is not part of the public API because we're intentionally -// exposing only a limited API to get/set labels on the block itself in a -// manner similar to the main hcl.Block type, but our block accessors all -// use this to get the underlying node content to work with. -func (b *Block) labelsObj() *blockLabels { - return b.labels.content.(*blockLabels) -} - -type blockLabels struct { - inTree - - items nodeSet -} - -func newBlockLabels(labels []string) *blockLabels { - ret := &blockLabels{ - inTree: newInTree(), - items: newNodeSet(), - } - - ret.Replace(labels) - return ret -} - -func (bl *blockLabels) Replace(newLabels []string) { - bl.inTree.children.Clear() - bl.items.Clear() - - for _, label := range newLabels { - labelToks := TokensForValue(cty.StringVal(label)) - // Force a new label to use the quoted form, which is the idiomatic - // form. The unquoted form is supported in HCL 2 only for compatibility - // with historical use in HCL 1. - labelObj := newQuoted(labelToks) - labelNode := bl.children.Append(labelObj) - bl.items.Add(labelNode) - } -} - -func (bl *blockLabels) Current() []string { - labelNames := make([]string, 0, len(bl.items)) - list := bl.items.List() - - for _, label := range list { - switch labelObj := label.content.(type) { - case *identifier: - if labelObj.token.Type == hclsyntax.TokenIdent { - labelString := string(labelObj.token.Bytes) - labelNames = append(labelNames, labelString) - } - - case *quoted: - tokens := labelObj.tokens - if len(tokens) == 3 && - tokens[0].Type == hclsyntax.TokenOQuote && - tokens[1].Type == hclsyntax.TokenQuotedLit && - tokens[2].Type == hclsyntax.TokenCQuote { - // Note that TokenQuotedLit may contain escape sequences. - labelString, diags := hclsyntax.ParseStringLiteralToken(tokens[1].asHCLSyntax()) - - // If parsing the string literal returns error diagnostics - // then we can just assume the label doesn't match, because it's invalid in some way. - if !diags.HasErrors() { - labelNames = append(labelNames, labelString) - } - } else if len(tokens) == 2 && - tokens[0].Type == hclsyntax.TokenOQuote && - tokens[1].Type == hclsyntax.TokenCQuote { - // An open quote followed immediately by a closing quote is a - // valid but unusual blank string label. - labelNames = append(labelNames, "") - } - - default: - // If neither of the previous cases are true (should be impossible) - // then we can just ignore it, because it's invalid too. - } - } - - return labelNames -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go deleted file mode 100644 index 119f53e6..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go +++ /dev/null @@ -1,239 +0,0 @@ -package hclwrite - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Body struct { - inTree - - items nodeSet -} - -func newBody() *Body { - return &Body{ - inTree: newInTree(), - items: newNodeSet(), - } -} - -func (b *Body) appendItem(c nodeContent) *node { - nn := b.children.Append(c) - b.items.Add(nn) - return nn -} - -func (b *Body) appendItemNode(nn *node) *node { - nn.assertUnattached() - b.children.AppendNode(nn) - b.items.Add(nn) - return nn -} - -// Clear removes all of the items from the body, making it empty. -func (b *Body) Clear() { - b.children.Clear() -} - -func (b *Body) AppendUnstructuredTokens(ts Tokens) { - b.inTree.children.Append(ts) -} - -// Attributes returns a new map of all of the attributes in the body, with -// the attribute names as the keys. -func (b *Body) Attributes() map[string]*Attribute { - ret := make(map[string]*Attribute) - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - name := string(nameObj.token.Bytes) - ret[name] = attr - } - } - return ret -} - -// Blocks returns a new slice of all the blocks in the body. -func (b *Body) Blocks() []*Block { - ret := make([]*Block, 0, len(b.items)) - for _, n := range b.items.List() { - if block, isBlock := n.content.(*Block); isBlock { - ret = append(ret, block) - } - } - return ret -} - -// GetAttribute returns the attribute from the body that has the given name, -// or returns nil if there is currently no matching attribute. -func (b *Body) GetAttribute(name string) *Attribute { - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - if nameObj.hasName(name) { - // We've found it! - return attr - } - } - } - - return nil -} - -// getAttributeNode is like GetAttribute but it returns the node containing -// the selected attribute (if one is found) rather than the attribute itself. -func (b *Body) getAttributeNode(name string) *node { - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - if nameObj.hasName(name) { - // We've found it! - return n - } - } - } - - return nil -} - -// FirstMatchingBlock returns a first matching block from the body that has the -// given name and labels or returns nil if there is currently no matching -// block. -func (b *Body) FirstMatchingBlock(typeName string, labels []string) *Block { - for _, block := range b.Blocks() { - if typeName == block.Type() { - labelNames := block.Labels() - if len(labels) == 0 && len(labelNames) == 0 { - return block - } - if reflect.DeepEqual(labels, labelNames) { - return block - } - } - } - - return nil -} - -// RemoveBlock removes the given block from the body, if it's in that body. -// If it isn't present, this is a no-op. -// -// Returns true if it removed something, or false otherwise. -func (b *Body) RemoveBlock(block *Block) bool { - for n := range b.items { - if n.content == block { - n.Detach() - b.items.Remove(n) - return true - } - } - return false -} - -// SetAttributeRaw either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the block, -// using the given tokens verbatim as the expression. -// -// The same caveats apply to this function as for NewExpressionRaw on which -// it is based. If possible, prefer to use SetAttributeValue or -// SetAttributeTraversal. -func (b *Body) SetAttributeRaw(name string, tokens Tokens) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionRaw(tokens) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// SetAttributeValue either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the block. -// -// The value is given as a cty.Value, and must therefore be a literal. To set -// a variable reference or other traversal, use SetAttributeTraversal. -// -// The return value is the attribute that was either modified in-place or -// created. -func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionLiteral(val) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// SetAttributeTraversal either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the body. -// -// The new expression is given as a hcl.Traversal, which must be an absolute -// traversal. To set a literal value, use SetAttributeValue. -// -// The return value is the attribute that was either modified in-place or -// created. -func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionAbsTraversal(traversal) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// RemoveAttribute removes the attribute with the given name from the body. -// -// The return value is the attribute that was removed, or nil if there was -// no such attribute (in which case the call was a no-op). -func (b *Body) RemoveAttribute(name string) *Attribute { - node := b.getAttributeNode(name) - if node == nil { - return nil - } - node.Detach() - b.items.Remove(node) - return node.content.(*Attribute) -} - -// AppendBlock appends an existing block (which must not be already attached -// to a body) to the end of the receiving body. -func (b *Body) AppendBlock(block *Block) *Block { - b.appendItem(block) - return block -} - -// AppendNewBlock appends a new nested block to the end of the receiving body -// with the given type name and labels. -func (b *Body) AppendNewBlock(typeName string, labels []string) *Block { - block := newBlock() - block.init(typeName, labels) - b.appendItem(block) - return block -} - -// AppendNewline appends a newline token to th end of the receiving body, -// which generally serves as a separator between different sets of body -// contents. -func (b *Body) AppendNewline() { - b.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go deleted file mode 100644 index 073c3087..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go +++ /dev/null @@ -1,224 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Expression struct { - inTree - - absTraversals nodeSet -} - -func newExpression() *Expression { - return &Expression{ - inTree: newInTree(), - absTraversals: newNodeSet(), - } -} - -// NewExpressionRaw constructs an expression containing the given raw tokens. -// -// There is no automatic validation that the given tokens produce a valid -// expression. Callers of thus function must take care to produce invalid -// expression tokens. Where possible, use the higher-level functions -// NewExpressionLiteral or NewExpressionAbsTraversal instead. -// -// Because NewExpressionRaw does not interpret the given tokens in any way, -// an expression created by NewExpressionRaw will produce an empty result -// for calls to its method Variables, even if the given token sequence -// contains a subslice that would normally be interpreted as a traversal under -// parsing. -func NewExpressionRaw(tokens Tokens) *Expression { - expr := newExpression() - // We copy the tokens here in order to make sure that later mutations - // by the caller don't inadvertently cause our expression to become - // invalid. - copyTokens := make(Tokens, len(tokens)) - copy(copyTokens, tokens) - expr.children.AppendUnstructuredTokens(copyTokens) - return expr -} - -// NewExpressionLiteral constructs an an expression that represents the given -// literal value. -// -// Since an unknown value cannot be represented in source code, this function -// will panic if the given value is unknown or contains a nested unknown value. -// Use val.IsWhollyKnown before calling to be sure. -// -// HCL native syntax does not directly represent lists, maps, and sets, and -// instead relies on the automatic conversions to those collection types from -// either list or tuple constructor syntax. Therefore converting collection -// values to source code and re-reading them will lose type information, and -// the reader must provide a suitable type at decode time to recover the -// original value. -func NewExpressionLiteral(val cty.Value) *Expression { - toks := TokensForValue(val) - expr := newExpression() - expr.children.AppendUnstructuredTokens(toks) - return expr -} - -// NewExpressionAbsTraversal constructs an expression that represents the -// given traversal, which must be absolute or this function will panic. -func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression { - if traversal.IsRelative() { - panic("can't construct expression from relative traversal") - } - - physT := newTraversal() - rootName := traversal.RootName() - steps := traversal[1:] - - { - tn := newTraverseName() - tn.name = tn.children.Append(newIdentifier(&Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(rootName), - })) - physT.steps.Add(physT.children.Append(tn)) - } - - for _, step := range steps { - switch ts := step.(type) { - case hcl.TraverseAttr: - tn := newTraverseName() - tn.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - }) - tn.name = tn.children.Append(newIdentifier(&Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - })) - physT.steps.Add(physT.children.Append(tn)) - case hcl.TraverseIndex: - ti := newTraverseIndex() - ti.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }, - }) - indexExpr := NewExpressionLiteral(ts.Key) - ti.key = ti.children.Append(indexExpr) - ti.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }, - }) - physT.steps.Add(physT.children.Append(ti)) - } - } - - expr := newExpression() - expr.absTraversals.Add(expr.children.Append(physT)) - return expr -} - -// Variables returns the absolute traversals that exist within the receiving -// expression. -func (e *Expression) Variables() []*Traversal { - nodes := e.absTraversals.List() - ret := make([]*Traversal, len(nodes)) - for i, node := range nodes { - ret[i] = node.content.(*Traversal) - } - return ret -} - -// RenameVariablePrefix examines each of the absolute traversals in the -// receiving expression to see if they have the given sequence of names as -// a prefix prefix. If so, they are updated in place to have the given -// replacement names instead of that prefix. -// -// This can be used to implement symbol renaming. The calling application can -// visit all relevant expressions in its input and apply the same renaming -// to implement a global symbol rename. -// -// The search and replacement traversals must be the same length, or this -// method will panic. Only attribute access operations can be matched and -// replaced. Index steps never match the prefix. -func (e *Expression) RenameVariablePrefix(search, replacement []string) { - if len(search) != len(replacement) { - panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement))) - } -Traversals: - for node := range e.absTraversals { - traversal := node.content.(*Traversal) - if len(traversal.steps) < len(search) { - // If it's shorter then it can't have our prefix - continue - } - - stepNodes := traversal.steps.List() - for i, name := range search { - step, isName := stepNodes[i].content.(*TraverseName) - if !isName { - continue Traversals // only name nodes can match - } - foundNameBytes := step.name.content.(*identifier).token.Bytes - if len(foundNameBytes) != len(name) { - continue Traversals - } - if string(foundNameBytes) != name { - continue Traversals - } - } - - // If we get here then the prefix matched, so now we'll swap in - // the replacement strings. - for i, name := range replacement { - step := stepNodes[i].content.(*TraverseName) - token := step.name.content.(*identifier).token - token.Bytes = []byte(name) - } - } -} - -// Traversal represents a sequence of variable, attribute, and/or index -// operations. -type Traversal struct { - inTree - - steps nodeSet -} - -func newTraversal() *Traversal { - return &Traversal{ - inTree: newInTree(), - steps: newNodeSet(), - } -} - -type TraverseName struct { - inTree - - name *node -} - -func newTraverseName() *TraverseName { - return &TraverseName{ - inTree: newInTree(), - } -} - -type TraverseIndex struct { - inTree - - key *node -} - -func newTraverseIndex() *TraverseIndex { - return &TraverseIndex{ - inTree: newInTree(), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go deleted file mode 100644 index 56d5b775..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hclwrite deals with the problem of generating HCL configuration -// and of making specific surgical changes to existing HCL configurations. -// -// It operates at a different level of abstraction than the main HCL parser -// and AST, since details such as the placement of comments and newlines -// are preserved when unchanged. -// -// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes -// to be read out, created and inserted, etc. Nodes represent syntax constructs -// rather than semantic concepts. -package hclwrite diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go deleted file mode 100644 index dca247ee..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go +++ /dev/null @@ -1,463 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// format rewrites tokens within the given sequence, in-place, to adjust the -// whitespace around their content to achieve canonical formatting. -func format(tokens Tokens) { - // Formatting is a multi-pass process. More details on the passes below, - // but this is the overview: - // - adjust the leading space on each line to create appropriate - // indentation - // - adjust spaces between tokens in a single cell using a set of rules - // - adjust the leading space in the "assign" and "comment" cells on each - // line to vertically align with neighboring lines. - // All of these steps operate in-place on the given tokens, so a caller - // may collect a flat sequence of all of the tokens underlying an AST - // and pass it here and we will then indirectly modify the AST itself. - // Formatting must change only whitespace. Specifically, that means - // changing the SpacesBefore attribute on a token while leaving the - // other token attributes unchanged. - - lines := linesForFormat(tokens) - formatIndent(lines) - formatSpaces(lines) - formatCells(lines) -} - -func formatIndent(lines []formatLine) { - // Our methodology for indents is to take the input one line at a time - // and count the bracketing delimiters on each line. If a line has a net - // increase in open brackets, we increase the indent level by one and - // remember how many new openers we had. If the line has a net _decrease_, - // we'll compare it to the most recent number of openers and decrease the - // dedent level by one each time we pass an indent level remembered - // earlier. - // The "indent stack" used here allows for us to recognize degenerate - // input where brackets are not symmetrical within lines and avoid - // pushing things too far left or right, creating confusion. - - // We'll start our indent stack at a reasonable capacity to minimize the - // chance of us needing to grow it; 10 here means 10 levels of indent, - // which should be more than enough for reasonable HCL uses. - indents := make([]int, 0, 10) - - for i := range lines { - line := &lines[i] - if len(line.lead) == 0 { - continue - } - - if line.lead[0].Type == hclsyntax.TokenNewline { - // Never place spaces before a newline - line.lead[0].SpacesBefore = 0 - continue - } - - netBrackets := 0 - for _, token := range line.lead { - netBrackets += tokenBracketChange(token) - if token.Type == hclsyntax.TokenOHeredoc { - break - } - } - - for _, token := range line.assign { - netBrackets += tokenBracketChange(token) - } - - switch { - case netBrackets > 0: - line.lead[0].SpacesBefore = 2 * len(indents) - indents = append(indents, netBrackets) - case netBrackets < 0: - closed := -netBrackets - for closed > 0 && len(indents) > 0 { - switch { - - case closed > indents[len(indents)-1]: - closed -= indents[len(indents)-1] - indents = indents[:len(indents)-1] - - case closed < indents[len(indents)-1]: - indents[len(indents)-1] -= closed - closed = 0 - - default: - indents = indents[:len(indents)-1] - closed = 0 - } - } - line.lead[0].SpacesBefore = 2 * len(indents) - default: - line.lead[0].SpacesBefore = 2 * len(indents) - } - } -} - -func formatSpaces(lines []formatLine) { - // placeholder token used when we don't have a token but we don't want - // to pass a real "nil" and complicate things with nil pointer checks - nilToken := &Token{ - Type: hclsyntax.TokenNil, - Bytes: []byte{}, - SpacesBefore: 0, - } - - for _, line := range lines { - for i, token := range line.lead { - var before, after *Token - if i > 0 { - before = line.lead[i-1] - } else { - before = nilToken - } - if i < (len(line.lead) - 1) { - after = line.lead[i+1] - } else { - continue - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - for i, token := range line.assign { - if i == 0 { - // first token in "assign" always has one space before to - // separate the equals sign from what it's assigning. - token.SpacesBefore = 1 - } - - var before, after *Token - if i > 0 { - before = line.assign[i-1] - } else { - before = nilToken - } - if i < (len(line.assign) - 1) { - after = line.assign[i+1] - } else { - continue - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - - } -} - -func formatCells(lines []formatLine) { - chainStart := -1 - maxColumns := 0 - - // We'll deal with the "assign" cell first, since moving that will - // also impact the "comment" cell. - closeAssignChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.assign[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.assign == nil { - if chainStart != -1 { - closeAssignChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeAssignChain(len(lines)) - } - - // Now we'll deal with the comments - closeCommentChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() + chainLine.assign.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.comment[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.comment == nil { - if chainStart != -1 { - closeCommentChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() + line.assign.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeCommentChain(len(lines)) - } -} - -// spaceAfterToken decides whether a particular subject token should have a -// space after it when surrounded by the given before and after tokens. -// "before" can be TokenNil, if the subject token is at the start of a sequence. -func spaceAfterToken(subject, before, after *Token) bool { - switch { - - case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: - // Never add spaces before a newline - return false - - case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: - // Don't split a function name from open paren in a call - return false - - case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: - // Don't use spaces around attribute access dots - return false - - case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: - // No space right before a comma or ... in an argument list - return false - - case subject.Type == hclsyntax.TokenComma: - // Always a space after a comma - return true - - case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: - // No extra spaces within templates - return false - - case hclsyntax.Keyword([]byte{'i', 'n'}).TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: - // This is a special case for inside for expressions where a user - // might want to use a literal tuple constructor: - // [for x in [foo]: x] - // ... in that case, we would normally produce in[foo] thinking that - // in is a reference, but we'll recognize it as a keyword here instead - // to make the result less confusing. - return true - - case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): - return false - - case subject.Type == hclsyntax.TokenBang: - // No space after a bang - return false - - case subject.Type == hclsyntax.TokenMinus: - // Since a minus can either be subtraction or negation, and the latter - // should _not_ have a space after it, we need to use some heuristics - // to decide which case this is. - // We guess that we have a negation if the token before doesn't look - // like it could be the end of an expression. - - switch before.Type { - - case hclsyntax.TokenNil: - // Minus at the start of input must be a negation - return false - - case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: - // Minus immediately after an opening bracket or separator must be a negation. - return false - - case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: - // Minus immediately after another arithmetic operator must be negation. - return false - - case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: - // Minus immediately after another comparison operator must be negation. - return false - - case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: - // Minus immediately after logical operator doesn't make sense but probably intended as negation. - return false - - default: - return true - } - - case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: - // Unlike other bracket types, braces have spaces on both sides of them, - // both in single-line nested blocks foo { bar = baz } and in object - // constructor expressions foo = { bar = baz }. - if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { - // An open brace followed by a close brace is an exception, however. - // e.g. foo {} rather than foo { } - return false - } - return true - - // In the unlikely event that an interpolation expression is just - // a single object constructor, we'll put a space between the ${ and - // the following { to make this more obvious, and then the same - // thing for the two braces at the end. - case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: - return true - case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: - return true - - // Don't add spaces between interpolated items - case subject.Type == hclsyntax.TokenTemplateSeqEnd && (after.Type == hclsyntax.TokenTemplateInterp || after.Type == hclsyntax.TokenTemplateControl): - return false - - case tokenBracketChange(subject) > 0: - // No spaces after open brackets - return false - - case tokenBracketChange(after) < 0: - // No spaces before close brackets - return false - - default: - // Most tokens are space-separated - return true - - } -} - -func linesForFormat(tokens Tokens) []formatLine { - if len(tokens) == 0 { - return make([]formatLine, 0) - } - - // first we'll count our lines, so we can allocate the array for them in - // a single block. (We want to minimize memory pressure in this codepath, - // so it can be run somewhat-frequently by editor integrations.) - lineCount := 1 // if there are zero newlines then there is one line - for _, tok := range tokens { - if tokenIsNewline(tok) { - lineCount++ - } - } - - // To start, we'll just put everything in the "lead" cell on each line, - // and then do another pass over the lines afterwards to adjust. - lines := make([]formatLine, lineCount) - li := 0 - lineStart := 0 - for i, tok := range tokens { - if tok.Type == hclsyntax.TokenEOF { - // The EOF token doesn't belong to any line, and terminates the - // token sequence. - lines[li].lead = tokens[lineStart:i] - break - } - - if tokenIsNewline(tok) { - lines[li].lead = tokens[lineStart : i+1] - lineStart = i + 1 - li++ - } - } - - // If a set of tokens doesn't end in TokenEOF (e.g. because it's a - // fragment of tokens from the middle of a file) then we might fall - // out here with a line still pending. - if lineStart < len(tokens) { - lines[li].lead = tokens[lineStart:] - if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { - lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] - } - } - - // Now we'll pick off any trailing comments and attribute assignments - // to shuffle off into the "comment" and "assign" cells. - for i := range lines { - line := &lines[i] - - if len(line.lead) == 0 { - // if the line is empty then there's nothing for us to do - // (this should happen only for the final line, because all other - // lines would have a newline token of some kind) - continue - } - - if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { - line.comment = line.lead[len(line.lead)-1:] - line.lead = line.lead[:len(line.lead)-1] - } - - for i, tok := range line.lead { - if i > 0 && tok.Type == hclsyntax.TokenEqual { - // We only move the tokens into "assign" if the RHS seems to - // be a whole expression, which we determine by counting - // brackets. If there's a net positive number of brackets - // then that suggests we're introducing a multi-line expression. - netBrackets := 0 - for _, token := range line.lead[i:] { - netBrackets += tokenBracketChange(token) - } - - if netBrackets == 0 { - line.assign = line.lead[i:] - line.lead = line.lead[:i] - } - break - } - } - } - - return lines -} - -func tokenIsNewline(tok *Token) bool { - if tok.Type == hclsyntax.TokenNewline { - return true - } else if tok.Type == hclsyntax.TokenComment { - // Single line tokens (# and //) consume their terminating newline, - // so we need to treat them as newline tokens as well. - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - return true - } - } - return false -} - -func tokenBracketChange(tok *Token) int { - switch tok.Type { - case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: - return 1 - case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: - return -1 - default: - return 0 - } -} - -// formatLine represents a single line of source code for formatting purposes, -// splitting its tokens into up to three "cells": -// -// lead: always present, representing everything up to one of the others -// assign: if line contains an attribute assignment, represents the tokens -// starting at (and including) the equals symbol -// comment: if line contains any non-comment tokens and ends with a -// single-line comment token, represents the comment. -// -// When formatting, the leading spaces of the first tokens in each of these -// cells is adjusted to align vertically their occurences on consecutive -// rows. -type formatLine struct { - lead Tokens - assign Tokens - comment Tokens -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go deleted file mode 100644 index 6f6a2e63..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go +++ /dev/null @@ -1,396 +0,0 @@ -package hclwrite - -import ( - "fmt" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// TokensForValue returns a sequence of tokens that represents the given -// constant value. -// -// This function only supports types that are used by HCL. In particular, it -// does not support capsule types and will panic if given one. -// -// It is not possible to express an unknown value in source code, so this -// function will panic if the given value is unknown or contains any unknown -// values. A caller can call the value's IsWhollyKnown method to verify that -// no unknown values are present before calling TokensForValue. -func TokensForValue(val cty.Value) Tokens { - toks := appendTokensForValue(val, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForTraversal returns a sequence of tokens that represents the given -// traversal. -// -// If the traversal is absolute then the result is a self-contained, valid -// reference expression. If the traversal is relative then the returned tokens -// could be appended to some other expression tokens to traverse into the -// represented expression. -func TokensForTraversal(traversal hcl.Traversal) Tokens { - toks := appendTokensForTraversal(traversal, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForIdentifier returns a sequence of tokens representing just the -// given identifier. -// -// In practice this function can only ever generate exactly one token, because -// an identifier is always a leaf token in the syntax tree. -// -// This is similar to calling TokensForTraversal with a single-step absolute -// traversal, but avoids the need to construct a separate traversal object -// for this simple common case. If you need to generate a multi-step traversal, -// use TokensForTraversal instead. -func TokensForIdentifier(name string) Tokens { - return Tokens{ - newIdentToken(name), - } -} - -// TokensForTuple returns a sequence of tokens that represents a tuple -// constructor, with element expressions populated from the given list -// of tokens. -// -// TokensForTuple includes the given elements verbatim into the element -// positions in the resulting tuple expression, without any validation to -// ensure that they represent valid expressions. Use TokensForValue or -// TokensForTraversal to generate valid leaf expression values, or use -// TokensForTuple, TokensForObject, and TokensForFunctionCall to -// generate other nested compound expressions. -func TokensForTuple(elems []Tokens) Tokens { - var toks Tokens - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - for index, elem := range elems { - if index > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - toks = append(toks, elem...) - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForObject returns a sequence of tokens that represents an object -// constructor, with attribute name/value pairs populated from the given -// list of attribute token objects. -// -// TokensForObject includes the given tokens verbatim into the name and -// value positions in the resulting object expression, without any validation -// to ensure that they represent valid expressions. Use TokensForValue or -// TokensForTraversal to generate valid leaf expression values, or use -// TokensForTuple, TokensForObject, and TokensForFunctionCall to -// generate other nested compound expressions. -// -// Note that HCL requires placing a traversal expression in parentheses if -// you intend to use it as an attribute name expression, because otherwise -// the parser will interpret it as a literal attribute name. TokensForObject -// does not handle that situation automatically, so a caller must add the -// necessary `TokenOParen` and TokenCParen` manually if needed. -func TokensForObject(attrs []ObjectAttrTokens) Tokens { - var toks Tokens - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }) - if len(attrs) > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }) - } - for _, attr := range attrs { - toks = append(toks, attr.Name...) - toks = append(toks, &Token{ - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }) - toks = append(toks, attr.Value...) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }) - - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForFunctionCall returns a sequence of tokens that represents call -// to the function with the given name, using the argument tokens to -// populate the argument expressions. -// -// TokensForFunctionCall includes the given argument tokens verbatim into the -// positions in the resulting call expression, without any validation -// to ensure that they represent valid expressions. Use TokensForValue or -// TokensForTraversal to generate valid leaf expression values, or use -// TokensForTuple, TokensForObject, and TokensForFunctionCall to -// generate other nested compound expressions. -// -// This function doesn't include an explicit way to generate the expansion -// symbol "..." on the final argument. Currently, generating that requires -// manually appending a TokenEllipsis with the bytes "..." to the tokens for -// the final argument. -func TokensForFunctionCall(funcName string, args ...Tokens) Tokens { - var toks Tokens - toks = append(toks, TokensForIdentifier(funcName)...) - toks = append(toks, &Token{ - Type: hclsyntax.TokenOParen, - Bytes: []byte{'('}, - }) - for index, arg := range args { - if index > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - toks = append(toks, arg...) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenCParen, - Bytes: []byte{')'}, - }) - - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -func appendTokensForValue(val cty.Value, toks Tokens) Tokens { - switch { - - case !val.IsKnown(): - panic("cannot produce tokens for unknown value") - - case val.IsNull(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(`null`), - }) - - case val.Type() == cty.Bool: - var src []byte - if val.True() { - src = []byte(`true`) - } else { - src = []byte(`false`) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: src, - }) - - case val.Type() == cty.Number: - bf := val.AsBigFloat() - srcStr := bf.Text('f', -1) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNumberLit, - Bytes: []byte(srcStr), - }) - - case val.Type() == cty.String: - // TODO: If it's a multi-line string ending in a newline, format - // it as a HEREDOC instead. - src := escapeQuotedStringLit(val.AsString()) - toks = append(toks, &Token{ - Type: hclsyntax.TokenOQuote, - Bytes: []byte{'"'}, - }) - if len(src) > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenQuotedLit, - Bytes: src, - }) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenCQuote, - Bytes: []byte{'"'}, - }) - - case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - - i := 0 - for it := val.ElementIterator(); it.Next(); { - if i > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - _, eVal := it.Element() - toks = appendTokensForValue(eVal, toks) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - - case val.Type().IsMapType() || val.Type().IsObjectType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }) - if val.LengthInt() > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }) - } - - i := 0 - for it := val.ElementIterator(); it.Next(); { - eKey, eVal := it.Element() - if hclsyntax.ValidIdentifier(eKey.AsString()) { - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(eKey.AsString()), - }) - } else { - toks = appendTokensForValue(eKey, toks) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }) - toks = appendTokensForValue(eVal, toks) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }) - - default: - panic(fmt.Sprintf("cannot produce tokens for %#v", val)) - } - - return toks -} - -func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { - for _, step := range traversal { - toks = appendTokensForTraversalStep(step, toks) - } - return toks -} - -func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) Tokens { - switch ts := step.(type) { - case hcl.TraverseRoot: - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }) - case hcl.TraverseAttr: - toks = append( - toks, - &Token{ - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }, - ) - case hcl.TraverseIndex: - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - toks = appendTokensForValue(ts.Key, toks) - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - default: - panic(fmt.Sprintf("unsupported traversal step type %T", step)) - } - - return toks -} - -func escapeQuotedStringLit(s string) []byte { - if len(s) == 0 { - return nil - } - buf := make([]byte, 0, len(s)) - for i, r := range s { - switch r { - case '\n': - buf = append(buf, '\\', 'n') - case '\r': - buf = append(buf, '\\', 'r') - case '\t': - buf = append(buf, '\\', 't') - case '"': - buf = append(buf, '\\', '"') - case '\\': - buf = append(buf, '\\', '\\') - case '$', '%': - buf = appendRune(buf, r) - remain := s[i+1:] - if len(remain) > 0 && remain[0] == '{' { - // Double up our template introducer symbol to escape it. - buf = appendRune(buf, r) - } - default: - if !unicode.IsPrint(r) { - var fmted string - if r < 65536 { - fmted = fmt.Sprintf("\\u%04x", r) - } else { - fmted = fmt.Sprintf("\\U%08x", r) - } - buf = append(buf, fmted...) - } else { - buf = appendRune(buf, r) - } - } - } - return buf -} - -func appendRune(b []byte, r rune) []byte { - l := utf8.RuneLen(r) - for i := 0; i < l; i++ { - b = append(b, 0) // make room at the end of our buffer - } - ch := b[len(b)-l:] - utf8.EncodeRune(ch, r) - return b -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go deleted file mode 100644 index cedf6862..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go +++ /dev/null @@ -1,23 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type nativeNodeSorter struct { - Nodes []hclsyntax.Node -} - -func (s nativeNodeSorter) Len() int { - return len(s.Nodes) -} - -func (s nativeNodeSorter) Less(i, j int) bool { - rangeI := s.Nodes[i].Range() - rangeJ := s.Nodes[j].Range() - return rangeI.Start.Byte < rangeJ.Start.Byte -} - -func (s nativeNodeSorter) Swap(i, j int) { - s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go deleted file mode 100644 index d3a5b72c..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go +++ /dev/null @@ -1,296 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" -) - -// node represents a node in the AST. -type node struct { - content nodeContent - - list *nodes - before, after *node -} - -func newNode(c nodeContent) *node { - return &node{ - content: c, - } -} - -func (n *node) Equal(other *node) bool { - return cmp.Equal(n.content, other.content) -} - -func (n *node) BuildTokens(to Tokens) Tokens { - return n.content.BuildTokens(to) -} - -// Detach removes the receiver from the list it currently belongs to. If the -// node is not currently in a list, this is a no-op. -func (n *node) Detach() { - if n.list == nil { - return - } - if n.before != nil { - n.before.after = n.after - } - if n.after != nil { - n.after.before = n.before - } - if n.list.first == n { - n.list.first = n.after - } - if n.list.last == n { - n.list.last = n.before - } - n.list = nil - n.before = nil - n.after = nil -} - -// ReplaceWith removes the receiver from the list it currently belongs to and -// inserts a new node with the given content in its place. If the node is not -// currently in a list, this function will panic. -// -// The return value is the newly-constructed node, containing the given content. -// After this function returns, the reciever is no longer attached to a list. -func (n *node) ReplaceWith(c nodeContent) *node { - if n.list == nil { - panic("can't replace node that is not in a list") - } - - before := n.before - after := n.after - list := n.list - n.before, n.after, n.list = nil, nil, nil - - nn := newNode(c) - nn.before = before - nn.after = after - nn.list = list - if before != nil { - before.after = nn - } - if after != nil { - after.before = nn - } - return nn -} - -func (n *node) assertUnattached() { - if n.list != nil { - panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) - } -} - -// nodeContent is the interface type implemented by all AST content types. -type nodeContent interface { - walkChildNodes(w internalWalkFunc) - BuildTokens(to Tokens) Tokens -} - -// nodes is a list of nodes. -type nodes struct { - first, last *node -} - -func (ns *nodes) BuildTokens(to Tokens) Tokens { - for n := ns.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -func (ns *nodes) Clear() { - ns.first = nil - ns.last = nil -} - -func (ns *nodes) Append(c nodeContent) *node { - n := &node{ - content: c, - } - ns.AppendNode(n) - n.list = ns - return n -} - -func (ns *nodes) AppendNode(n *node) { - if ns.last != nil { - n.before = ns.last - ns.last.after = n - } - n.list = ns - ns.last = n - if ns.first == nil { - ns.first = n - } -} - -// Insert inserts a nodeContent at a given position. -// This is just a wrapper for InsertNode. See InsertNode for details. -func (ns *nodes) Insert(pos *node, c nodeContent) *node { - n := &node{ - content: c, - } - ns.InsertNode(pos, n) - n.list = ns - return n -} - -// InsertNode inserts a node at a given position. -// The first argument is a node reference before which to insert. -// To insert it to an empty list, set position to nil. -func (ns *nodes) InsertNode(pos *node, n *node) { - if pos == nil { - // inserts n to empty list. - ns.first = n - ns.last = n - } else { - // inserts n before pos. - pos.before.after = n - n.before = pos.before - pos.before = n - n.after = pos - } - - n.list = ns -} - -func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { - if len(tokens) == 0 { - return nil - } - n := newNode(tokens) - ns.AppendNode(n) - n.list = ns - return n -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns *nodes) FindNodeWithContent(content nodeContent) *node { - for n := ns.first; n != nil; n = n.after { - if n.content == content { - return n - } - } - return nil -} - -// nodeSet is an unordered set of nodes. It is used to describe a set of nodes -// that all belong to the same list that have some role or characteristic -// in common. -type nodeSet map[*node]struct{} - -func newNodeSet() nodeSet { - return make(nodeSet) -} - -func (ns nodeSet) Has(n *node) bool { - if ns == nil { - return false - } - _, exists := ns[n] - return exists -} - -func (ns nodeSet) Add(n *node) { - ns[n] = struct{}{} -} - -func (ns nodeSet) Remove(n *node) { - delete(ns, n) -} - -func (ns nodeSet) Clear() { - for n := range ns { - delete(ns, n) - } -} - -func (ns nodeSet) List() []*node { - if len(ns) == 0 { - return nil - } - - ret := make([]*node, 0, len(ns)) - - // Determine which list we are working with. We assume here that all of - // the nodes belong to the same list, since that is part of the contract - // for nodeSet. - var list *nodes - for n := range ns { - list = n.list - break - } - - // We recover the order by iterating over the whole list. This is not - // the most efficient way to do it, but our node lists should always be - // small so not worth making things more complex. - for n := list.first; n != nil; n = n.after { - if ns.Has(n) { - ret = append(ret, n) - } - } - return ret -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns nodeSet) FindNodeWithContent(content nodeContent) *node { - for n := range ns { - if n.content == content { - return n - } - } - return nil -} - -type internalWalkFunc func(*node) - -// inTree can be embedded into a content struct that has child nodes to get -// a standard implementation of the NodeContent interface and a record of -// a potential parent node. -type inTree struct { - parent *node - children *nodes -} - -func newInTree() inTree { - return inTree{ - children: &nodes{}, - } -} - -func (it *inTree) assertUnattached() { - if it.parent != nil { - panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) - } -} - -func (it *inTree) walkChildNodes(w internalWalkFunc) { - for n := it.children.first; n != nil; n = n.after { - w(n) - } -} - -func (it *inTree) BuildTokens(to Tokens) Tokens { - for n := it.children.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -// leafNode can be embedded into a content struct to give it a do-nothing -// implementation of walkChildNodes -type leafNode struct { -} - -func (n *leafNode) walkChildNodes(w internalWalkFunc) { -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go deleted file mode 100644 index 3df51447..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go +++ /dev/null @@ -1,638 +0,0 @@ -package hclwrite - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// Our "parser" here is actually not doing any parsing of its own. Instead, -// it leans on the native parser in hclsyntax, and then uses the source ranges -// from the AST to partition the raw token sequence to match the raw tokens -// up to AST nodes. -// -// This strategy feels somewhat counter-intuitive, since most of the work the -// parser does is thrown away here, but this strategy is chosen because the -// normal parsing work done by hclsyntax is considered to be the "main case", -// while modifying and re-printing source is more of an edge case, used only -// in ancillary tools, and so it's good to keep all the main parsing logic -// with the main case but keep all of the extra complexity of token wrangling -// out of the main parser, which is already rather complex just serving the -// use-cases it already serves. -// -// If the parsing step produces any errors, the returned File is nil because -// we can't reliably extract tokens from the partial AST produced by an -// erroneous parse. -func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - file, diags := hclsyntax.ParseConfig(src, filename, start) - if diags.HasErrors() { - return nil, diags - } - - // To do our work here, we use the "native" tokens (those from hclsyntax) - // to match against source ranges in the AST, but ultimately produce - // slices from our sequence of "writer" tokens, which contain only - // *relative* position information that is more appropriate for - // transformation/writing use-cases. - nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) - if diags.HasErrors() { - // should never happen, since we would've caught these diags in - // the first call above. - return nil, diags - } - writerTokens := writerTokens(nativeTokens) - - from := inputTokens{ - nativeTokens: nativeTokens, - writerTokens: writerTokens, - } - - before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) - ret := &File{ - inTree: newInTree(), - - srcBytes: src, - body: root, - } - - nodes := ret.inTree.children - nodes.Append(before.Tokens()) - nodes.AppendNode(root) - nodes.Append(after.Tokens()) - - return ret, diags -} - -type inputTokens struct { - nativeTokens hclsyntax.Tokens - writerTokens Tokens -} - -func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { - for i, t := range it.writerTokens { - if t.Type == ty { - return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) - } - } - panic(fmt.Sprintf("didn't find any token of type %s", ty)) -} - -func (it inputTokens) PartitionTypeOk(ty hclsyntax.TokenType) (before, within, after inputTokens, ok bool) { - for i, t := range it.writerTokens { - if t.Type == ty { - return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)), true - } - } - - return inputTokens{}, inputTokens{}, inputTokens{}, false -} - -func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { - before, within, after := it.PartitionType(ty) - if within.Len() != 1 { - panic("PartitionType found more than one token") - } - return before, within.Tokens()[0], after -} - -// PartitionIncludeComments is like Partition except the returned "within" -// range includes any lead and line comments associated with the range. -func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - start = partitionLeadCommentTokens(it.nativeTokens[:start]) - _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) - end += afterNewline - - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return - -} - -// PartitionBlockItem is similar to PartitionIncludeComments but it returns -// the comments as separate token sequences so that they can be captured into -// AST attributes. It makes assumptions that apply only to block items, so -// should not be used for other constructs. -func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { - before, within, after = it.Partition(rng) - before, leadComments = before.PartitionLeadComments() - lineComments, newline, after = after.PartitionLineEndTokens() - return -} - -func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { - start := partitionLeadCommentTokens(it.nativeTokens) - before = it.Slice(0, start) - within = it.Slice(start, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { - afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) - comments = it.Slice(0, afterComments) - newline = it.Slice(afterComments, afterNewline) - after = it.Slice(afterNewline, len(it.nativeTokens)) - return -} - -func (it inputTokens) Slice(start, end int) inputTokens { - // When we slice, we create a new slice with no additional capacity because - // we expect that these slices will be mutated in order to insert - // new code into the AST, and we want to ensure that a new underlying - // array gets allocated in that case, rather than writing into some - // following slice and corrupting it. - return inputTokens{ - nativeTokens: it.nativeTokens[start:end:end], - writerTokens: it.writerTokens[start:end:end], - } -} - -func (it inputTokens) Len() int { - return len(it.nativeTokens) -} - -func (it inputTokens) Tokens() Tokens { - return it.writerTokens -} - -func (it inputTokens) Types() []hclsyntax.TokenType { - ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) - for i, tok := range it.nativeTokens { - ret[i] = tok.Type - } - return ret -} - -// parseBody locates the given body within the given input tokens and returns -// the resulting *Body object as well as the tokens that appeared before and -// after it. -func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { - before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) - - // The main AST doesn't retain the original source ordering of the - // body items, so we need to reconstruct that ordering by inspecting - // their source ranges. - nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) - for _, nativeAttr := range nativeBody.Attributes { - nativeItems = append(nativeItems, nativeAttr) - } - for _, nativeBlock := range nativeBody.Blocks { - nativeItems = append(nativeItems, nativeBlock) - } - sort.Sort(nativeNodeSorter{nativeItems}) - - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - - remain := within - for _, nativeItem := range nativeItems { - beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) - - if beforeItem.Len() > 0 { - body.AppendUnstructuredTokens(beforeItem.Tokens()) - } - body.appendItemNode(item) - - remain = afterItem - } - - if remain.Len() > 0 { - body.AppendUnstructuredTokens(remain.Tokens()) - } - - return before, newNode(body), after -} - -func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { - before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) - - var item *node - - switch tItem := nativeItem.(type) { - case *hclsyntax.Attribute: - item = parseAttribute(tItem, within, leadComments, lineComments, newline) - case *hclsyntax.Block: - item = parseBlock(tItem, within, leadComments, lineComments, newline) - default: - // should never happen if caller is behaving - panic("unsupported native item type") - } - - return before, item, after -} - -func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { - attr := &Attribute{ - inTree: newInTree(), - } - children := attr.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - attr.leadComments = cn - children.AppendNode(cn) - } - - before, nameTokens, from := from.Partition(nativeAttr.NameRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if nameTokens.Len() != 1 { - // Should never happen with valid input - panic("attribute name is not exactly one token") - } - token := nameTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - attr.name = in - children.AppendNode(in) - } - - before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(equalsTokens.Tokens()) - - before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) - { - children.AppendUnstructuredTokens(before.Tokens()) - exprNode := parseExpression(nativeAttr.Expr, exprTokens) - attr.expr = exprNode - children.AppendNode(exprNode) - } - - { - cn := newNode(newComments(lineComments.Tokens())) - attr.lineComments = cn - children.AppendNode(cn) - } - - children.AppendUnstructuredTokens(newline.Tokens()) - - // Collect any stragglers, though there shouldn't be any - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(attr) -} - -func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { - block := &Block{ - inTree: newInTree(), - } - children := block.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - block.leadComments = cn - children.AppendNode(cn) - } - - before, typeTokens, from := from.Partition(nativeBlock.TypeRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if typeTokens.Len() != 1 { - // Should never happen with valid input - panic("block type name is not exactly one token") - } - token := typeTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - block.typeName = in - children.AppendNode(in) - } - - before, labelsNode, from := parseBlockLabels(nativeBlock, from) - block.labels = labelsNode - children.AppendNode(labelsNode) - - before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) - children.AppendUnstructuredTokens(before.Tokens()) - block.open = children.AppendUnstructuredTokens(oBrace.Tokens()) - - // We go a bit out of order here: we go hunting for the closing brace - // so that we have a delimited body, but then we'll deal with the body - // before we actually append the closing brace and any straggling tokens - // that appear after it. - bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) - before, body, after := parseBody(nativeBlock.Body, bodyTokens) - children.AppendUnstructuredTokens(before.Tokens()) - block.body = body - children.AppendNode(body) - children.AppendUnstructuredTokens(after.Tokens()) - - block.close = children.AppendUnstructuredTokens(cBrace.Tokens()) - - // stragglers - children.AppendUnstructuredTokens(from.Tokens()) - if lineComments.Len() > 0 { - // blocks don't actually have line comments, so we'll just treat - // them as extra stragglers - children.AppendUnstructuredTokens(lineComments.Tokens()) - } - children.AppendUnstructuredTokens(newline.Tokens()) - - return newNode(block) -} - -func parseBlockLabels(nativeBlock *hclsyntax.Block, from inputTokens) (inputTokens, *node, inputTokens) { - labelsObj := newBlockLabels(nil) - children := labelsObj.children - - var beforeAll inputTokens - for i, rng := range nativeBlock.LabelRanges { - var before, labelTokens inputTokens - before, labelTokens, from = from.Partition(rng) - if i == 0 { - beforeAll = before - } else { - children.AppendUnstructuredTokens(before.Tokens()) - } - tokens := labelTokens.Tokens() - var ln *node - if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { - ln = newNode(newIdentifier(tokens[0])) - } else { - ln = newNode(newQuoted(tokens)) - } - labelsObj.items.Add(ln) - children.AppendNode(ln) - } - - after := from - return beforeAll, newNode(labelsObj), after -} - -func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { - expr := newExpression() - children := expr.inTree.children - - nativeVars := nativeExpr.Variables() - - for _, nativeTraversal := range nativeVars { - before, traversal, after := parseTraversal(nativeTraversal, from) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(traversal) - expr.absTraversals.Add(traversal) - from = after - } - // Attach any stragglers that don't belong to a traversal to the expression - // itself. In an expression with no traversals at all, this is just the - // entirety of "from". - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(expr) -} - -func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { - traversal := newTraversal() - children := traversal.inTree.children - before, from, after = from.Partition(nativeTraversal.SourceRange()) - - stepAfter := from - for _, nativeStep := range nativeTraversal { - before, step, after := parseTraversalStep(nativeStep, stepAfter) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(step) - traversal.steps.Add(step) - stepAfter = after - } - - return before, newNode(traversal), after -} - -func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { - var children *nodes - switch tNativeStep := nativeStep.(type) { - - case hcl.TraverseRoot, hcl.TraverseAttr: - step := newTraverseName() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) - name := newIdentifier(token) - children.AppendUnstructuredTokens(inBefore.Tokens()) - step.name = children.Append(name) - children.AppendUnstructuredTokens(inAfter.Tokens()) - return before, newNode(step), after - - case hcl.TraverseIndex: - step := newTraverseIndex() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - - if inBefore, dot, from, ok := from.PartitionTypeOk(hclsyntax.TokenDot); ok { - children.AppendUnstructuredTokens(inBefore.Tokens()) - children.AppendUnstructuredTokens(dot.Tokens()) - - valBefore, valToken, valAfter := from.PartitionTypeSingle(hclsyntax.TokenNumberLit) - children.AppendUnstructuredTokens(valBefore.Tokens()) - key := newNumber(valToken) - step.key = children.Append(key) - children.AppendUnstructuredTokens(valAfter.Tokens()) - - return before, newNode(step), after - } - - var inBefore, oBrack, keyTokens, cBrack inputTokens - inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) - children.AppendUnstructuredTokens(inBefore.Tokens()) - children.AppendUnstructuredTokens(oBrack.Tokens()) - keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) - - keyVal := tNativeStep.Key - switch keyVal.Type() { - case cty.String: - key := newQuoted(keyTokens.Tokens()) - step.key = children.Append(key) - case cty.Number: - valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) - children.AppendUnstructuredTokens(valBefore.Tokens()) - key := newNumber(valToken) - step.key = children.Append(key) - children.AppendUnstructuredTokens(valAfter.Tokens()) - } - - children.AppendUnstructuredTokens(cBrack.Tokens()) - children.AppendUnstructuredTokens(from.Tokens()) - - return before, newNode(step), after - default: - panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) - } - -} - -// writerTokens takes a sequence of tokens as produced by the main hclsyntax -// package and transforms it into an equivalent sequence of tokens using -// this package's own token model. -// -// The resulting list contains the same number of tokens and uses the same -// indices as the input, allowing the two sets of tokens to be correlated -// by index. -func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { - // Ultimately we want a slice of token _pointers_, but since we can - // predict how much memory we're going to devote to tokens we'll allocate - // it all as a single flat buffer and thus give the GC less work to do. - tokBuf := make([]Token, len(nativeTokens)) - var lastByteOffset int - for i, mainToken := range nativeTokens { - // Create a copy of the bytes so that we can mutate without - // corrupting the original token stream. - bytes := make([]byte, len(mainToken.Bytes)) - copy(bytes, mainToken.Bytes) - - tokBuf[i] = Token{ - Type: mainToken.Type, - Bytes: bytes, - - // We assume here that spaces are always ASCII spaces, since - // that's what the scanner also assumes, and thus the number - // of bytes skipped is also the number of space characters. - SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, - } - - lastByteOffset = mainToken.Range.End.Byte - } - - // Now make a slice of pointers into the previous slice. - ret := make(Tokens, len(tokBuf)) - for i := range ret { - ret[i] = &tokBuf[i] - } - - return ret -} - -// partitionTokens takes a sequence of tokens and a hcl.Range and returns -// two indices within the token sequence that correspond with the range -// boundaries, such that the slice operator could be used to produce -// three token sequences for before, within, and after respectively: -// -// start, end := partitionTokens(toks, rng) -// before := toks[:start] -// within := toks[start:end] -// after := toks[end:] -// -// This works best when the range is aligned with token boundaries (e.g. -// because it was produced in terms of the scanner's result) but if that isn't -// true then it will make a best effort that may produce strange results at -// the boundaries. -// -// Native hclsyntax tokens are used here, because they contain the necessary -// absolute position information. However, since writerTokens produces a -// correlatable sequence of writer tokens, the resulting indices can be -// used also to index into its result, allowing the partitioning of writer -// tokens to be driven by the partitioning of native tokens. -// -// The tokens are assumed to be in source order and non-overlapping, which -// will be true if the token sequence from the scanner is used directly. -func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { - // We use a linear search here because we assume that in most cases our - // target range is close to the beginning of the sequence, and the sequences - // are generally small for most reasonable files anyway. - for i := 0; ; i++ { - if i >= len(toks) { - // No tokens for the given range at all! - return len(toks), len(toks) - } - - if toks[i].Range.Start.Byte >= rng.Start.Byte { - start = i - break - } - } - - for i := start; ; i++ { - if i >= len(toks) { - // The range "hangs off" the end of the token sequence - return start, len(toks) - } - - if toks[i].Range.Start.Byte >= rng.End.Byte { - end = i // end marker is exclusive - break - } - } - - return start, end -} - -// partitionLeadCommentTokens takes a sequence of tokens that is assumed -// to immediately precede a construct that can have lead comment tokens, -// and returns the index into that sequence where the lead comments begin. -// -// Lead comments are defined as whole lines containing only comment tokens -// with no blank lines between. If no such lines are found, the returned -// index will be len(toks). -func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { - // single-line comments (which is what we're interested in here) - // consume their trailing newline, so we can just walk backwards - // until we stop seeing comment tokens. - for i := len(toks) - 1; i >= 0; i-- { - if toks[i].Type != hclsyntax.TokenComment { - return i + 1 - } - } - return 0 -} - -// partitionLineEndTokens takes a sequence of tokens that is assumed -// to immediately follow a construct that can have a line comment, and -// returns first the index where any line comments end and then second -// the index immediately after the trailing newline. -// -// Line comments are defined as comments that appear immediately after -// a construct on the same line where its significant tokens ended. -// -// Since single-line comment tokens (# and //) include the newline that -// terminates them, in the presence of these the two returned indices -// will be the same since the comment itself serves as the line end. -func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { - for i := 0; i < len(toks); i++ { - tok := toks[i] - if tok.Type != hclsyntax.TokenComment { - switch tok.Type { - case hclsyntax.TokenNewline: - return i, i + 1 - case hclsyntax.TokenEOF: - // Although this is valid, we mustn't include the EOF - // itself as our "newline" or else strange things will - // happen when we try to append new items. - return i, i - default: - // If we have well-formed input here then nothing else should be - // possible. This path should never happen, because we only try - // to extract tokens from the sequence if the parser succeeded, - // and it should catch this problem itself. - panic("malformed line trailers: expected only comments and newlines") - } - } - - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - // Newline at the end of a single-line comment serves both as - // the end of comments *and* the end of the line. - return i + 1, i + 1 - } - } - return len(toks), len(toks) -} - -// lexConfig uses the hclsyntax scanner to get a token stream and then -// rewrites it into this package's token model. -// -// Any errors produced during scanning are ignored, so the results of this -// function should be used with care. -func lexConfig(src []byte) Tokens { - mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) - return writerTokens(mainTokens) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go deleted file mode 100644 index 678a3aa4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go +++ /dev/null @@ -1,44 +0,0 @@ -package hclwrite - -import ( - "bytes" - - "github.com/hashicorp/hcl/v2" -) - -// NewFile creates a new file object that is empty and ready to have constructs -// added t it. -func NewFile() *File { - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - file := &File{ - inTree: newInTree(), - } - file.body = file.inTree.children.Append(body) - return file -} - -// ParseConfig interprets the given source bytes into a *hclwrite.File. The -// resulting AST can be used to perform surgical edits on the source code -// before turning it back into bytes again. -func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - return parse(src, filename, start) -} - -// Format takes source code and performs simple whitespace changes to transform -// it to a canonical layout style. -// -// Format skips constructing an AST and works directly with tokens, so it -// is less expensive than formatting via the AST for situations where no other -// changes will be made. It also ignores syntax errors and can thus be applied -// to partial source code, although the result in that case may not be -// desirable. -func Format(src []byte) []byte { - tokens := lexConfig(src) - format(tokens) - buf := &bytes.Buffer{} - tokens.WriteTo(buf) - return buf.Bytes() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go deleted file mode 100644 index 57a5fd2b..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go +++ /dev/null @@ -1,132 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" - - "github.com/apparentlymart/go-textseg/v13/textseg" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// Token is a single sequence of bytes annotated with a type. It is similar -// in purpose to hclsyntax.Token, but discards the source position information -// since that is not useful in code generation. -type Token struct { - Type hclsyntax.TokenType - Bytes []byte - - // We record the number of spaces before each token so that we can - // reproduce the exact layout of the original file when we're making - // surgical changes in-place. When _new_ code is created it will always - // be in the canonical style, but we preserve layout of existing code. - SpacesBefore int -} - -// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. -// A complete token is not possible since we don't have source location -// information here, and so this method is unexported so we can be sure it will -// only be used for internal purposes where we know the range isn't important. -// -// This is primarily intended to allow us to re-use certain functionality from -// hclsyntax rather than re-implementing it against our own token type here. -func (t *Token) asHCLSyntax() hclsyntax.Token { - return hclsyntax.Token{ - Type: t.Type, - Bytes: t.Bytes, - Range: hcl.Range{ - Filename: "", - }, - } -} - -// Tokens is a flat list of tokens. -type Tokens []*Token - -func (ts Tokens) Bytes() []byte { - buf := &bytes.Buffer{} - ts.WriteTo(buf) - return buf.Bytes() -} - -func (ts Tokens) testValue() string { - return string(ts.Bytes()) -} - -// Columns returns the number of columns (grapheme clusters) the token sequence -// occupies. The result is not meaningful if there are newline or single-line -// comment tokens in the sequence. -func (ts Tokens) Columns() int { - ret := 0 - for _, token := range ts { - ret += token.SpacesBefore // spaces are always worth one column each - ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) - ret += ct - } - return ret -} - -// WriteTo takes an io.Writer and writes the bytes for each token to it, -// along with the spacing that separates each token. In other words, this -// allows serializing the tokens to a file or other such byte stream. -func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { - // We know we're going to be writing a lot of small chunks of repeated - // space characters, so we'll prepare a buffer of these that we can - // easily pass to wr.Write without any further allocation. - spaces := make([]byte, 40) - for i := range spaces { - spaces[i] = ' ' - } - - var n int64 - var err error - for _, token := range ts { - if err != nil { - return n, err - } - - for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { - thisChunk := spacesBefore - if thisChunk > len(spaces) { - thisChunk = len(spaces) - } - var thisN int - thisN, err = wr.Write(spaces[:thisChunk]) - n += int64(thisN) - if err != nil { - return n, err - } - } - - var thisN int - thisN, err = wr.Write(token.Bytes) - n += int64(thisN) - } - - return n, err -} - -func (ts Tokens) walkChildNodes(w internalWalkFunc) { - // Unstructured tokens have no child nodes -} - -func (ts Tokens) BuildTokens(to Tokens) Tokens { - return append(to, ts...) -} - -// ObjectAttrTokens represents the raw tokens for the name and value of -// one attribute in an object constructor expression. -// -// This is defined primarily for use with function TokensForObject. See -// that function's documentation for more information. -type ObjectAttrTokens struct { - Name Tokens - Value Tokens -} - -func newIdentToken(name string) *Token { - return &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(name), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/ast.go b/vendor/github.com/hashicorp/hcl/v2/json/ast.go deleted file mode 100644 index 9c580ca3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package json - -import ( - "math/big" - - "github.com/hashicorp/hcl/v2" -) - -type node interface { - Range() hcl.Range - StartRange() hcl.Range -} - -type objectVal struct { - Attrs []*objectAttr - SrcRange hcl.Range // range of the entire object, brace-to-brace - OpenRange hcl.Range // range of the opening brace - CloseRange hcl.Range // range of the closing brace -} - -func (n *objectVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *objectVal) StartRange() hcl.Range { - return n.OpenRange -} - -type objectAttr struct { - Name string - Value node - NameRange hcl.Range // range of the name string -} - -func (n *objectAttr) Range() hcl.Range { - return n.NameRange -} - -func (n *objectAttr) StartRange() hcl.Range { - return n.NameRange -} - -type arrayVal struct { - Values []node - SrcRange hcl.Range // range of the entire object, bracket-to-bracket - OpenRange hcl.Range // range of the opening bracket -} - -func (n *arrayVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *arrayVal) StartRange() hcl.Range { - return n.OpenRange -} - -type booleanVal struct { - Value bool - SrcRange hcl.Range -} - -func (n *booleanVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *booleanVal) StartRange() hcl.Range { - return n.SrcRange -} - -type numberVal struct { - Value *big.Float - SrcRange hcl.Range -} - -func (n *numberVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *numberVal) StartRange() hcl.Range { - return n.SrcRange -} - -type stringVal struct { - Value string - SrcRange hcl.Range -} - -func (n *stringVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *stringVal) StartRange() hcl.Range { - return n.SrcRange -} - -type nullVal struct { - SrcRange hcl.Range -} - -func (n *nullVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *nullVal) StartRange() hcl.Range { - return n.SrcRange -} - -// invalidVal is used as a placeholder where a value is needed for a valid -// parse tree but the input was invalid enough to prevent one from being -// created. -type invalidVal struct { - SrcRange hcl.Range -} - -func (n invalidVal) Range() hcl.Range { - return n.SrcRange -} - -func (n invalidVal) StartRange() hcl.Range { - return n.SrcRange -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go deleted file mode 100644 index fbdd8bff..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go +++ /dev/null @@ -1,33 +0,0 @@ -package json - -import ( - "github.com/agext/levenshtein" -) - -var keywords = []string{"false", "true", "null"} - -// keywordSuggestion tries to find a valid JSON keyword that is close to the -// given string and returns it if found. If no keyword is close enough, returns -// the empty string. -func keywordSuggestion(given string) string { - return nameSuggestion(given, keywords) -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/doc.go b/vendor/github.com/hashicorp/hcl/v2/json/doc.go deleted file mode 100644 index 84d73193..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package json is the JSON parser for HCL. It parses JSON files and returns -// implementations of the core HCL structural interfaces in terms of the -// JSON data inside. -// -// This is not a generic JSON parser. Instead, it deals with the mapping from -// the JSON information model to the HCL information model, using a number -// of hard-coded structural conventions. -// -// In most cases applications will not import this package directly, but will -// instead access its functionality indirectly through functions in the main -// "hcl" package and in the "hclparse" package. -package json diff --git a/vendor/github.com/hashicorp/hcl/v2/json/is.go b/vendor/github.com/hashicorp/hcl/v2/json/is.go deleted file mode 100644 index 73c6775f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/is.go +++ /dev/null @@ -1,54 +0,0 @@ -package json - -import ( - "github.com/hashicorp/hcl/v2" -) - -// IsJSONExpression returns true if and only if the given expression is one -// that originated in a JSON document. -// -// Applications aiming to be syntax-agnostic should not use this function and -// should instead use the normal expression evaluation or static analysis -// APIs. -// -// However, JSON expressions do have a unique behavior whereby they interpret -// the source JSON differently depending on the hcl.EvalContext value passed -// to the Value method -- in particular, a nil hcl.EvalContext returns -// literal strings rather than interpreting them as HCL template syntax -- -// and so in exceptional cases an application may wish to rely on that behavior -// in situations where it specifically knows the expression originated in JSON, -// in case it needs to do some non-standard handling of the expression in that -// case. -// -// Caution: The normal HCL API allows for HCL expression implementations that -// wrap other HCL expression implementations. This function will return false -// if given an expression of some other type that encapsulates a JSON -// expression, even if the wrapper implementation would in principle preserve -// the special evaluation behavior of the wrapped expression. -func IsJSONExpression(maybeJSONExpr hcl.Expression) bool { - _, ok := maybeJSONExpr.(*expression) - return ok -} - -// IsJSONBody returns true if and only if the given body is one that originated -// in a JSON document. -// -// Applications aiming to be syntax-agnostic should not use this function and -// should instead use the normal schema-driven or "just attributes' decoding -// APIs. -// -// Howeer, JSON expressions do have a unique behavior whereby various different -// source JSON shapes can be interpreted in different ways depending on the -// given schema, and so in exceptional cases an application may need to -// perform some deeper analysis first in order to distinguish variants of -// different physical structure. -// -// Caution: The normal HCL API allows for HCL body implementations that wrap -// other HCL body implementations. This function will return false if given an -// expression of some other type that encapsulates a JSON body, even if -// the wrapper implementation would in principle preserve the special -// decoding behavior of the wrapped body. -func IsJSONBody(maybeJSONBody hcl.Body) bool { - _, ok := maybeJSONBody.(*body) - return ok -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go b/vendor/github.com/hashicorp/hcl/v2/json/navigation.go deleted file mode 100644 index bc8a97f7..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go +++ /dev/null @@ -1,70 +0,0 @@ -package json - -import ( - "fmt" - "strings" -) - -type navigation struct { - root node -} - -// Implementation of hcled.ContextString -func (n navigation) ContextString(offset int) string { - steps := navigationStepsRev(n.root, offset) - if steps == nil { - return "" - } - - // We built our slice backwards, so we'll reverse it in-place now. - half := len(steps) / 2 // integer division - for i := 0; i < half; i++ { - steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i] - } - - ret := strings.Join(steps, "") - if len(ret) > 0 && ret[0] == '.' { - ret = ret[1:] - } - return ret -} - -func navigationStepsRev(v node, offset int) []string { - switch tv := v.(type) { - case *objectVal: - // Do any of our properties have an object that contains the target - // offset? - for _, attr := range tv.Attrs { - k := attr.Name - av := attr.Value - - switch av.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if av.Range().ContainsOffset(offset) { - return append(navigationStepsRev(av, offset), "."+k) - } - } - case *arrayVal: - // Do any of our elements contain the target offset? - for i, elem := range tv.Values { - - switch elem.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if elem.Range().ContainsOffset(offset) { - return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/parser.go b/vendor/github.com/hashicorp/hcl/v2/json/parser.go deleted file mode 100644 index 6b7420b9..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/parser.go +++ /dev/null @@ -1,504 +0,0 @@ -package json - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func parseFileContent(buf []byte, filename string, start hcl.Pos) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{Filename: filename, Pos: start}) - p := newPeeker(tokens) - node, diags := parseValue(p) - if len(diags) == 0 && p.Peek().Type != tokenEOF { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous data after value", - Detail: "Extra characters appear after the JSON value.", - Subject: p.Peek().Range.Ptr(), - }) - } - return node, diags -} - -func parseExpression(buf []byte, filename string, start hcl.Pos) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{Filename: filename, Pos: start}) - p := newPeeker(tokens) - node, diags := parseValue(p) - if len(diags) == 0 && p.Peek().Type != tokenEOF { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous data after value", - Detail: "Extra characters appear after the JSON value.", - Subject: p.Peek().Range.Ptr(), - }) - } - return node, diags -} - -func parseValue(p *peeker) (node, hcl.Diagnostics) { - tok := p.Peek() - - wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { - if n != nil { - return n, diags - } - return invalidVal{tok.Range}, diags - } - - switch tok.Type { - case tokenBraceO: - return wrapInvalid(parseObject(p)) - case tokenBrackO: - return wrapInvalid(parseArray(p)) - case tokenNumber: - return wrapInvalid(parseNumber(p)) - case tokenString: - return wrapInvalid(parseString(p)) - case tokenKeyword: - return wrapInvalid(parseKeyword(p)) - case tokenBraceC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing JSON value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenBrackC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing array element value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenEOF: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing value", - Detail: "The JSON data ends prematurely.", - Subject: &tok.Range, - }, - }) - default: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid start of value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - } -} - -func tokenCanStartValue(tok token) bool { - switch tok.Type { - case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: - return true - default: - return false - } -} - -func parseObject(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - attrs := []*objectAttr{} - - // recover is used to shift the peeker to what seems to be the end of - // our object, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBraceO: - open++ - case tokenBraceC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBraceC { - break Token - } - - keyNode, keyDiags := parseValue(p) - diags = diags.Extend(keyDiags) - if keyNode == nil { - return nil, diags - } - - keyStrNode, ok := keyNode.(*stringVal) - if !ok { - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object property name", - Detail: "A JSON object property name must be a string", - Subject: keyNode.StartRange().Ptr(), - }) - } - - key := keyStrNode.Value - - colon := p.Read() - if colon.Type != tokenColon { - recover(colon) - - if colon.Type == tokenBraceC || colon.Type == tokenComma { - // Catch common mistake of using braces instead of brackets - // for an object. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing object value", - Detail: "A JSON object attribute must have a value, introduced by a colon.", - Subject: &colon.Range, - }) - } - - if colon.Type == tokenEquals { - // Possible confusion with native HCL syntax. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", - Subject: &colon.Range, - }) - } - - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "A colon must appear between an object property's name and its value.", - Subject: &colon.Range, - }) - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - attrs = append(attrs, &objectAttr{ - Name: key, - Value: valNode, - NameRange: keyStrNode.SrcRange, - }) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBraceC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in object", - Detail: "JSON does not permit a trailing comma after the final property in an object.", - Subject: &comma.Range, - }) - } - continue Token - case tokenEOF: - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing brace was found for this JSON object.", - Subject: &open.Range, - }) - case tokenBrackC: - // Consume the bracket anyway, so that we don't return with the peeker - // at a strange place. - p.Read() - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched braces", - Detail: "A JSON object must be closed with a brace, not a bracket.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBraceC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each property definition in an object.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &objectVal{ - Attrs: attrs, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - CloseRange: close.Range, - }, diags -} - -func parseArray(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - vals := []node{} - - // recover is used to shift the peeker to what seems to be the end of - // our array, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBrackO: - open++ - case tokenBrackC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBrackC { - break Token - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - vals = append(vals, valNode) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBrackC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in array", - Detail: "JSON does not permit a trailing comma after the final value in an array.", - Subject: &comma.Range, - }) - } - continue Token - case tokenColon: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid array value", - Detail: "A colon is not used to introduce values in a JSON array.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenEOF: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing bracket was found for this JSON array.", - Subject: &open.Range, - }) - case tokenBraceC: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched brackets", - Detail: "A JSON array must be closed with a bracket, not a brace.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBrackC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each value in an array.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &arrayVal{ - Values: vals, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - }, diags -} - -func parseNumber(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - - // Use encoding/json to validate the number syntax. - // TODO: Do this more directly to produce better diagnostics. - var num json.Number - err := json.Unmarshal(tok.Bytes, &num) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - // We want to guarantee that we parse numbers the same way as cty (and thus - // native syntax HCL) would here, so we'll use the cty parser even though - // in most other cases we don't actually introduce cty concepts until - // decoding time. We'll unwrap the parsed float immediately afterwards, so - // the cty value is just a temporary helper. - nv, err := cty.ParseNumberVal(string(num)) - if err != nil { - // Should never happen if above passed, since JSON numbers are a subset - // of what cty can parse... - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - return &numberVal{ - Value: nv.AsBigFloat(), - SrcRange: tok.Range, - }, nil -} - -func parseString(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - var str string - err := json.Unmarshal(tok.Bytes, &str) - - if err != nil { - var errRange hcl.Range - if serr, ok := err.(*json.SyntaxError); ok { - errOfs := serr.Offset - errPos := tok.Range.Start - errPos.Byte += int(errOfs) - - // TODO: Use the byte offset to properly count unicode - // characters for the column, and mark the whole of the - // character that was wrong as part of our range. - errPos.Column += int(errOfs) - - errEndPos := errPos - errEndPos.Byte++ - errEndPos.Column++ - - errRange = hcl.Range{ - Filename: tok.Range.Filename, - Start: errPos, - End: errEndPos, - } - } else { - errRange = tok.Range - } - - var contextRange *hcl.Range - if errRange != tok.Range { - contextRange = &tok.Range - } - - // FIXME: Eventually we should parse strings directly here so - // we can produce a more useful error message in the face fo things - // such as invalid escapes, etc. - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON string", - Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), - Subject: &errRange, - Context: contextRange, - }, - } - } - - return &stringVal{ - Value: str, - SrcRange: tok.Range, - }, nil -} - -func parseKeyword(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - s := string(tok.Bytes) - - switch s { - case "true": - return &booleanVal{ - Value: true, - SrcRange: tok.Range, - }, nil - case "false": - return &booleanVal{ - Value: false, - SrcRange: tok.Range, - }, nil - case "null": - return &nullVal{ - SrcRange: tok.Range, - }, nil - case "undefined", "NaN", "Infinity": - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), - Subject: &tok.Range, - }, - } - default: - var dym string - if suggest := keywordSuggestion(s); suggest != "" { - dym = fmt.Sprintf(" Did you mean %q?", suggest) - } - - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), - Subject: &tok.Range, - }, - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go b/vendor/github.com/hashicorp/hcl/v2/json/peeker.go deleted file mode 100644 index fc7bbf58..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go +++ /dev/null @@ -1,25 +0,0 @@ -package json - -type peeker struct { - tokens []token - pos int -} - -func newPeeker(tokens []token) *peeker { - return &peeker{ - tokens: tokens, - pos: 0, - } -} - -func (p *peeker) Peek() token { - return p.tokens[p.pos] -} - -func (p *peeker) Read() token { - ret := p.tokens[p.pos] - if ret.Type != tokenEOF { - p.pos++ - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/public.go b/vendor/github.com/hashicorp/hcl/v2/json/public.go deleted file mode 100644 index d1e4faf5..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/public.go +++ /dev/null @@ -1,117 +0,0 @@ -package json - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/hcl/v2" -) - -// Parse attempts to parse the given buffer as JSON and, if successful, returns -// a hcl.File for the HCL configuration represented by it. -// -// This is not a generic JSON parser. Instead, it deals only with the profile -// of JSON used to express HCL configuration. -// -// The returned file is valid only if the returned diagnostics returns false -// from its HasErrors method. If HasErrors returns true, the file represents -// the subset of data that was able to be parsed, which may be none. -func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - return ParseWithStartPos(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) -} - -// ParseWithStartPos attempts to parse like json.Parse, but unlike json.Parse -// you can pass a start position of the given JSON as a hcl.Pos. -// -// In most cases json.Parse should be sufficient, but it can be useful for parsing -// a part of JSON with correct positions. -func ParseWithStartPos(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { - rootNode, diags := parseFileContent(src, filename, start) - - switch rootNode.(type) { - case *objectVal, *arrayVal: - // okay - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Root value must be object", - Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", - Subject: rootNode.StartRange().Ptr(), - }) - - // Since we've already produced an error message for this being - // invalid, we'll return an empty placeholder here so that trying to - // extract content from our root body won't produce a redundant - // error saying the same thing again in more general terms. - fakePos := hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - } - fakeRange := hcl.Range{ - Filename: filename, - Start: fakePos, - End: fakePos, - } - rootNode = &objectVal{ - Attrs: []*objectAttr{}, - SrcRange: fakeRange, - OpenRange: fakeRange, - } - } - - file := &hcl.File{ - Body: &body{ - val: rootNode, - }, - Bytes: src, - Nav: navigation{rootNode}, - } - return file, diags -} - -// ParseExpression parses the given buffer as a standalone JSON expression, -// returning it as an instance of Expression. -func ParseExpression(src []byte, filename string) (hcl.Expression, hcl.Diagnostics) { - return ParseExpressionWithStartPos(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) -} - -// ParseExpressionWithStartPos parses like json.ParseExpression, but unlike -// json.ParseExpression you can pass a start position of the given JSON -// expression as a hcl.Pos. -func ParseExpressionWithStartPos(src []byte, filename string, start hcl.Pos) (hcl.Expression, hcl.Diagnostics) { - node, diags := parseExpression(src, filename, start) - return &expression{src: node}, diags -} - -// ParseFile is a convenience wrapper around Parse that first attempts to load -// data from the given filename, passing the result to Parse if successful. -// -// If the file cannot be read, an error diagnostic with nil context is returned. -func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { - f, err := os.Open(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to open file", - Detail: fmt.Sprintf("The file %q could not be opened.", filename), - }, - } - } - defer f.Close() - - src, err := ioutil.ReadAll(f) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), - }, - } - } - - return Parse(src, filename) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go b/vendor/github.com/hashicorp/hcl/v2/json/scanner.go deleted file mode 100644 index b7111631..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go +++ /dev/null @@ -1,306 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/apparentlymart/go-textseg/v13/textseg" - "github.com/hashicorp/hcl/v2" -) - -//go:generate stringer -type tokenType scanner.go -type tokenType rune - -const ( - tokenBraceO tokenType = '{' - tokenBraceC tokenType = '}' - tokenBrackO tokenType = '[' - tokenBrackC tokenType = ']' - tokenComma tokenType = ',' - tokenColon tokenType = ':' - tokenKeyword tokenType = 'K' - tokenString tokenType = 'S' - tokenNumber tokenType = 'N' - tokenEOF tokenType = '␄' - tokenInvalid tokenType = 0 - tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax -) - -type token struct { - Type tokenType - Bytes []byte - Range hcl.Range -} - -// scan returns the primary tokens for the given JSON buffer in sequence. -// -// The responsibility of this pass is to just mark the slices of the buffer -// as being of various types. It is lax in how it interprets the multi-byte -// token types keyword, string and number, preferring to capture erroneous -// extra bytes that we presume the user intended to be part of the token -// so that we can generate more helpful diagnostics in the parser. -func scan(buf []byte, start pos) []token { - var tokens []token - p := start - for { - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - buf, p = skipWhitespace(buf, p) - - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - start = p - - first := buf[0] - switch { - case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': - p.Pos.Column++ - p.Pos.Byte++ - tokens = append(tokens, token{ - Type: tokenType(first), - Bytes: buf[0:1], - Range: posRange(start, p), - }) - buf = buf[1:] - case first == '"': - var tokBuf []byte - tokBuf, buf, p = scanString(buf, p) - tokens = append(tokens, token{ - Type: tokenString, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartNumber(first): - var tokBuf []byte - tokBuf, buf, p = scanNumber(buf, p) - tokens = append(tokens, token{ - Type: tokenNumber, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartKeyword(first): - var tokBuf []byte - tokBuf, buf, p = scanKeyword(buf, p) - tokens = append(tokens, token{ - Type: tokenKeyword, - Bytes: tokBuf, - Range: posRange(start, p), - }) - default: - tokens = append(tokens, token{ - Type: tokenInvalid, - Bytes: buf[:1], - Range: start.Range(1, 1), - }) - // If we've encountered an invalid then we might as well stop - // scanning since the parser won't proceed beyond this point. - // We insert a synthetic EOF marker here to match the expectations - // of consumers of this data structure. - p.Pos.Column++ - p.Pos.Byte++ - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - } -} - -func byteCanStartNumber(b byte) bool { - switch b { - // We are slightly more tolerant than JSON requires here since we - // expect the parser will make a stricter interpretation of the - // number bytes, but we specifically don't allow 'e' or 'E' here - // since we want the scanner to treat that as the start of an - // invalid keyword instead, to produce more intelligible error messages. - case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return true - default: - return false - } -} - -func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't check that the sequence of digit-ish bytes is - // in a valid order. The parser must do this when decoding a number - // token. - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func byteCanStartKeyword(b byte) bool { - switch { - // We allow any sequence of alphabetical characters here, even though - // JSON is more constrained, so that we can collect what we presume - // the user intended to be a single keyword and then check its validity - // in the parser, where we can generate better diagnostics. - // So e.g. we want to be able to say: - // unrecognized keyword "True". Did you mean "true"? - case isAlphabetical(b): - return true - default: - return false - } -} - -func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - b := buf[i] - switch { - case isAlphabetical(b) || b == '_': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func scanString(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't validate correct use of escapes, etc. It pays - // attention to escapes only for the purpose of identifying the closing - // quote character. It's the parser's responsibility to do proper - // validation. - // - // The scanner also doesn't specifically detect unterminated string - // literals, though they can be identified in the parser by checking if - // the final byte in a string token is the double-quote character. - - // Skip the opening quote symbol - i := 1 - p := start - p.Pos.Byte++ - p.Pos.Column++ - escaping := false -Byte: - for i < len(buf) { - b := buf[i] - - switch { - case b == '\\': - escaping = !escaping - p.Pos.Byte++ - p.Pos.Column++ - i++ - case b == '"': - p.Pos.Byte++ - p.Pos.Column++ - i++ - if !escaping { - break Byte - } - escaping = false - case b < 32: - break Byte - default: - // Advance by one grapheme cluster, so that we consider each - // grapheme to be a "column". - // Ignoring error because this scanner cannot produce errors. - advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) - - p.Pos.Byte += advance - p.Pos.Column++ - i += advance - - escaping = false - } - } - return buf[:i], buf[i:], p -} - -func skipWhitespace(buf []byte, start pos) ([]byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case ' ': - p.Pos.Byte++ - p.Pos.Column++ - case '\n': - p.Pos.Byte++ - p.Pos.Column = 1 - p.Pos.Line++ - case '\r': - // For the purpose of line/column counting we consider a - // carriage return to take up no space, assuming that it will - // be paired up with a newline (on Windows, for example) that - // will account for both of them. - p.Pos.Byte++ - case '\t': - // We arbitrarily count a tab as if it were two spaces, because - // we need to choose _some_ number here. This means any system - // that renders code on-screen with markers must itself treat - // tabs as a pair of spaces for rendering purposes, or instead - // use the byte offset and back into its own column position. - p.Pos.Byte++ - p.Pos.Column += 2 - default: - break Byte - } - } - return buf[i:], p -} - -type pos struct { - Filename string - Pos hcl.Pos -} - -func (p *pos) Range(byteLen, charLen int) hcl.Range { - start := p.Pos - end := p.Pos - end.Byte += byteLen - end.Column += charLen - return hcl.Range{ - Filename: p.Filename, - Start: start, - End: end, - } -} - -func posRange(start, end pos) hcl.Range { - return hcl.Range{ - Filename: start.Filename, - Start: start.Pos, - End: end.Pos, - } -} - -func (t token) GoString() string { - return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) -} - -func isAlphabetical(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/spec.md b/vendor/github.com/hashicorp/hcl/v2/json/spec.md deleted file mode 100644 index dac5729d..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/spec.md +++ /dev/null @@ -1,405 +0,0 @@ -# HCL JSON Syntax Specification - -This is the specification for the JSON serialization for hcl. HCL is a system -for defining configuration languages for applications. The HCL information -model is designed to support multiple concrete syntaxes for configuration, -and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) -by being easy to machine-generate, whereas the native syntax is oriented -towards human authoring and maintenance - -This syntax is defined in terms of JSON as defined in -[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON -grammar as-is, and merely defines a specific methodology for interpreting -JSON constructs into HCL structural elements and expressions. - -This mapping is defined such that valid JSON-serialized HCL input can be -_produced_ using standard JSON implementations in various programming languages. -_Parsing_ such JSON has some additional constraints not beyond what is normally -supported by JSON parsers, so a specialized parser may be required that -is able to: - -- Preserve the relative ordering of properties defined in an object. -- Preserve multiple definitions of the same property name. -- Preserve numeric values to the precision required by the number type - in [the HCL syntax-agnostic information model](../spec.md). -- Retain source location information for parsed tokens/constructs in order - to produce good error messages. - -## Structural Elements - -[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an -abstract container for attribute definitions and child blocks. A body is -represented in JSON as either a single JSON object or a JSON array of objects. - -Body processing is in terms of JSON object properties, visited in the order -they appear in the input. Where a body is represented by a single JSON object, -the properties of that object are visited in order. Where a body is -represented by a JSON array, each of its elements are visited in order and -each element has its properties visited in order. If any element of the array -is not a JSON object then the input is erroneous. - -When a body is being processed in the _dynamic attributes_ mode, the allowance -of a JSON array in the previous paragraph does not apply and instead a single -JSON object is always required. - -As defined in the language-agnostic model, body processing is in terms -of a schema which provides context for interpreting the body's content. For -JSON bodies, the schema is crucial to allow differentiation of attribute -definitions and block definitions, both of which are represented via object -properties. - -The special property name `"//"`, when used in an object representing a HCL -body, is parsed and ignored. A property with this name can be used to -include human-readable comments. (This special property name is _not_ -processed in this way for any _other_ HCL constructs that are represented as -JSON objects.) - -### Attributes - -Where the given schema describes an attribute with a given name, the object -property with the matching name — if present — serves as the attribute's -definition. - -When a body is being processed in the _dynamic attributes_ mode, each object -property serves as an attribute definition for the attribute whose name -matches the property name. - -The value of an attribute definition property is interpreted as an _expression_, -as described in a later section. - -Given a schema that calls for an attribute named "foo", a JSON object like -the following provides a definition for that attribute: - -```json -{ - "foo": "bar baz" -} -``` - -### Blocks - -Where the given schema describes a block with a given type name, each object -property with the matching name serves as a definition of zero or more blocks -of that type. - -Processing of child blocks is in terms of nested JSON objects and arrays. -If the schema defines one or more _labels_ for the block type, a nested JSON -object or JSON array of objects is required for each labelling level. These -are flattened to a single ordered sequence of object properties using the -same algorithm as for body content as defined above. Each object property -serves as a label value at the corresponding level. - -After any labelling levels, the next nested value is either a JSON object -representing a single block body, or a JSON array of JSON objects that each -represent a single block body. Use of an array accommodates the definition -of multiple blocks that have identical type and labels. - -Given a schema that calls for a block type named "foo" with no labels, the -following JSON objects are all valid definitions of zero or more blocks of this -type: - -```json -{ - "foo": { - "child_attr": "baz" - } -} -``` - -```json -{ - "foo": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] -} -``` - -```json -{ - "foo": [] -} -``` - -The first of these defines a single child block of type "foo". The second -defines _two_ such blocks. The final example shows a degenerate definition -of zero blocks, though generators should prefer to omit the property entirely -in this scenario. - -Given a schema that calls for a block type named "foo" with _two_ labels, the -extra label levels must be represented as objects or arrays of objects as in -the following examples: - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": { - "child_attr": "baz" - } - } - } -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -```json -{ - "foo": [ - { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - } - }, - { - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } - ] -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -Arrays can be introduced at either the label definition or block body -definition levels to define multiple definitions of the same block type -or labels while preserving order. - -A JSON HCL parser _must_ support duplicate definitions of the same property -name within a single object, preserving all of them and the relative ordering -between them. The array-based forms are also required so that JSON HCL -configurations can be produced with JSON producing libraries that are not -able to preserve property definition order and multiple definitions of -the same property. - -## Expressions - -JSON lacks a native expression syntax, so the HCL JSON syntax instead defines -a mapping for each of the JSON value types, including a special mapping for -strings that allows optional use of arbitrary expressions. - -### Objects - -When interpreted as an expression, a JSON object represents a value of a HCL -object type. - -Each property of the JSON object represents an attribute of the HCL object type. -The property name string given in the JSON input is interpreted as a string -expression as described below, and its result is converted to string as defined -by the syntax-agnostic information model. If such a conversion is not possible, -an error is produced and evaluation fails. - -An instance of the constructed object type is then created, whose values -are interpreted by again recursively applying the mapping rules defined in -this section to each of the property values. - -If any evaluated property name strings produce null values, an error is -produced and evaluation fails. If any produce _unknown_ values, the _entire -object's_ result is an unknown value of the dynamic pseudo-type, signalling -that the type of the object cannot be determined. - -It is an error to define the same property name multiple times within a single -JSON object interpreted as an expression. In full expression mode, this -constraint applies to the name expression results after conversion to string, -rather than the raw string that may contain interpolation expressions. - -### Arrays - -When interpreted as an expression, a JSON array represents a value of a HCL -tuple type. - -Each element of the JSON array represents an element of the HCL tuple type. -The tuple type is constructed by enumerating the JSON array elements, creating -for each an element whose type is the result of recursively applying the -expression mapping rules. Correspondence is preserved between the array element -indices and the tuple element indices. - -An instance of the constructed tuple type is then created, whose values are -interpreted by again recursively applying the mapping rules defined in this -section. - -### Numbers - -When interpreted as an expression, a JSON number represents a HCL number value. - -HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must -be able to translate exactly the value given to a number of corresponding -precision, within the constraints set by the HCL syntax-agnostic information -model. - -In practice, off-the-shelf JSON serializers often do not support customizing the -processing of numbers, and instead force processing as 32-bit or 64-bit -floating point values. - -A _producer_ of JSON HCL that uses such a serializer can provide numeric values -as JSON strings where they have precision too great for representation in the -serializer's chosen numeric type in situations where the result will be -converted to number (using the standard conversion rules) by a calling -application. - -Alternatively, for expressions that are evaluated in full expression mode an -embedded template interpolation can be used to faithfully represent a number, -such as `"${1e150}"`, which will then be evaluated by the underlying HCL native -syntax expression evaluator. - -### Boolean Values - -The JSON boolean values `true` and `false`, when interpreted as expressions, -represent the corresponding HCL boolean values. - -### The Null Value - -The JSON value `null`, when interpreted as an expression, represents a -HCL null value of the dynamic pseudo-type. - -### Strings - -When interpreted as an expression, a JSON string may be interpreted in one of -two ways depending on the evaluation mode. - -If evaluating in literal-only mode (as defined by the syntax-agnostic -information model) the literal string is intepreted directly as a HCL string -value, by directly using the exact sequence of unicode characters represented. -Template interpolations and directives MUST NOT be processed in this mode, -allowing any characters that appear as introduction sequences to pass through -literally: - -```json -"Hello world! Template sequences like ${ are not intepreted here." -``` - -When evaluating in full expression mode (again, as defined by the syntax- -agnostic information model) the literal string is instead interpreted as a -_standalone template_ in the HCL Native Syntax. The expression evaluation -result is then the direct result of evaluating that template with the current -variable scope and function table. - -```json -"Hello, ${name}! Template sequences are interpreted in full expression mode." -``` - -In particular the _Template Interpolation Unwrapping_ requirement from the -HCL native syntax specification must be implemented, allowing the use of -single-interpolation templates to represent expressions that would not -otherwise be representable in JSON, such as the following example where -the result must be a number, rather than a string representation of a number: - -```json -"${ a + b }" -``` - -## Static Analysis - -The HCL static analysis operations are implemented for JSON values that -represent expressions, as described in the following sections. - -Due to the limited expressive power of the JSON syntax alone, use of these -static analyses functions rather than normal expression evaluation is used -as additional context for how a JSON value is to be interpreted, which means -that static analyses can result in a different interpretation of a given -expression than normal evaluation. - -### Static List - -An expression interpreted as a static list must be a JSON array. Each of the -values in the array is interpreted as an expression and returned. - -### Static Map - -An expression interpreted as a static map must be a JSON object. Each of the -key/value pairs in the object is presented as a pair of expressions. Since -object property names are always strings, evaluating the key expression with -a non-`nil` evaluation context will evaluate any template sequences given -in the property name. - -### Static Call - -An expression interpreted as a static call must be a string. The content of -the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then the static call analysis is delegated to -that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. - -### Static Traversal - -An expression interpreted as a static traversal must be a string. The content -of the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then static traversal analysis is delegated -to that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. diff --git a/vendor/github.com/hashicorp/hcl/v2/json/structure.go b/vendor/github.com/hashicorp/hcl/v2/json/structure.go deleted file mode 100644 index 76c9d739..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/structure.go +++ /dev/null @@ -1,637 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// body is the implementation of "Body" used for files processed with the JSON -// parser. -type body struct { - val node - - // If non-nil, the keys of this map cause the corresponding attributes to - // be treated as non-existing. This is used when Body.PartialContent is - // called, to produce the "remaining content" Body. - hiddenAttrs map[string]struct{} -} - -// expression is the implementation of "Expression" used for files processed -// with the JSON parser. -type expression struct { - src node -} - -func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, newBody, diags := b.PartialContent(schema) - - hiddenAttrs := newBody.(*body).hiddenAttrs - - var nameSuggestions []string - for _, attrS := range schema.Attributes { - if _, ok := hiddenAttrs[attrS.Name]; !ok { - // Only suggest an attribute name if we didn't use it already. - nameSuggestions = append(nameSuggestions, attrS.Name) - } - } - for _, blockS := range schema.Blocks { - // Blocks can appear multiple times, so we'll suggest their type - // names regardless of whether they've already been used. - nameSuggestions = append(nameSuggestions, blockS.Type) - } - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - for _, attr := range jsonAttrs { - k := attr.Name - if k == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, ok := hiddenAttrs[k]; !ok { - suggestion := nameSuggestion(k, nameSuggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous JSON object property", - Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), - Subject: &attr.NameRange, - Context: attr.Range().Ptr(), - }) - } - } - - return content, diags -} - -func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - usedNames := map[string]struct{}{} - if b.hiddenAttrs != nil { - for k := range b.hiddenAttrs { - usedNames[k] = struct{}{} - } - } - - content := &hcl.BodyContent{ - Attributes: map[string]*hcl.Attribute{}, - Blocks: nil, - - MissingItemRange: b.MissingItemRange(), - } - - // Create some more convenient data structures for our work below. - attrSchemas := map[string]hcl.AttributeSchema{} - blockSchemas := map[string]hcl.BlockHeaderSchema{} - for _, attrS := range schema.Attributes { - attrSchemas[attrS.Name] = attrS - } - for _, blockS := range schema.Blocks { - blockSchemas[blockS.Type] = blockS - } - - for _, jsonAttr := range jsonAttrs { - attrName := jsonAttr.Name - if _, used := b.hiddenAttrs[attrName]; used { - continue - } - - if attrS, defined := attrSchemas[attrName]; defined { - if existing, exists := content.Attributes[attrName]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate argument", - Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), - Subject: &jsonAttr.NameRange, - Context: jsonAttr.Range().Ptr(), - }) - continue - } - - content.Attributes[attrS.Name] = &hcl.Attribute{ - Name: attrS.Name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - usedNames[attrName] = struct{}{} - - } else if blockS, defined := blockSchemas[attrName]; defined { - bv := jsonAttr.Value - blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) - diags = append(diags, blockDiags...) - usedNames[attrName] = struct{}{} - } - - // We ignore anything that isn't defined because that's the - // PartialContent contract. The Content method will catch leftovers. - } - - // Make sure we got all the required attributes. - for _, attrS := range schema.Attributes { - if !attrS.Required { - continue - } - if _, defined := content.Attributes[attrS.Name]; !defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required argument", - Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), - Subject: b.MissingItemRange().Ptr(), - }) - } - } - - unusedBody := &body{ - val: b.val, - hiddenAttrs: usedNames, - } - - return content, unusedBody, diags -} - -// JustAttributes for JSON bodies interprets all properties of the wrapped -// JSON object as attributes and returns them. -func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - attrs := make(map[string]*hcl.Attribute) - - obj, ok := b.val.(*objectVal) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, setting the arguments for this block.", - Subject: b.val.StartRange().Ptr(), - }) - return attrs, diags - } - - for _, jsonAttr := range obj.Attrs { - name := jsonAttr.Name - if name == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - - if existing, exists := attrs[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate attribute definition", - Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), - Subject: &jsonAttr.NameRange, - }) - continue - } - - attrs[name] = &hcl.Attribute{ - Name: name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - } - - // No diagnostics possible here, since the parser already took care of - // finding duplicates and every JSON value can be a valid attribute value. - return attrs, diags -} - -func (b *body) MissingItemRange() hcl.Range { - switch tv := b.val.(type) { - case *objectVal: - return tv.CloseRange - case *arrayVal: - return tv.OpenRange - default: - // Should not happen in correct operation, but might show up if the - // input is invalid and we are producing partial results. - return tv.StartRange() - } -} - -func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { - if len(labelsLeft) > 0 { - labelName := labelsLeft[0] - jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) - diags = append(diags, attrDiags...) - - if len(jsonAttrs) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing block label", - Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), - Subject: v.StartRange().Ptr(), - }) - return - } - labelsUsed := append(labelsUsed, "") - labelRanges := append(labelRanges, hcl.Range{}) - for _, p := range jsonAttrs { - pk := p.Name - labelsUsed[len(labelsUsed)-1] = pk - labelRanges[len(labelRanges)-1] = p.NameRange - diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) - } - return - } - - // By the time we get here, we've peeled off all the labels and we're ready - // to deal with the block's actual content. - - // need to copy the label slices because their underlying arrays will - // continue to be mutated after we return. - labels := make([]string, len(labelsUsed)) - copy(labels, labelsUsed) - labelR := make([]hcl.Range, len(labelRanges)) - copy(labelR, labelRanges) - - switch tv := v.(type) { - case *nullVal: - // There is no block content, e.g the value is null. - return - case *objectVal: - // Single instance of the block - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: tv, - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - case *arrayVal: - // Multiple instances of the block - for _, av := range tv.Values { - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: av, // might be mistyped; we'll find out when content is requested for this body - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), - Subject: v.StartRange().Ptr(), - }) - } - return -} - -// collectDeepAttrs takes either a single object or an array of objects and -// flattens it into a list of object attributes, collecting attributes from -// all of the objects in a given array. -// -// Ordering is preserved, so a list of objects that each have one property -// will result in those properties being returned in the same order as the -// objects appeared in the array. -// -// This is appropriate for use only for objects representing bodies or labels -// within a block. -// -// The labelName argument, if non-null, is used to tailor returned error -// messages to refer to block labels rather than attributes and child blocks. -// It has no other effect. -func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { - var diags hcl.Diagnostics - var attrs []*objectAttr - - switch tv := v.(type) { - case *nullVal: - // If a value is null, then we don't return any attributes or return an error. - - case *objectVal: - attrs = append(attrs, tv.Attrs...) - - case *arrayVal: - for _, ev := range tv.Values { - switch tev := ev.(type) { - case *objectVal: - attrs = append(attrs, tev.Attrs...) - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), - Subject: ev.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, to define arguments and child blocks.", - Subject: ev.StartRange().Ptr(), - }) - } - } - } - - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), - Subject: v.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", - Subject: v.StartRange().Ptr(), - }) - } - } - - return attrs, diags -} - -func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - switch v := e.src.(type) { - case *stringVal: - if ctx != nil { - // Parse string contents as a HCL native language expression. - // We only do this if we have a context, so passing a nil context - // is how the caller specifies that interpolations are not allowed - // and that the string should just be returned verbatim. - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, evalDiags := expr.Value(ctx) - diags = append(diags, evalDiags...) - return val, diags - } - - return cty.StringVal(v.Value), nil - case *numberVal: - return cty.NumberVal(v.Value), nil - case *booleanVal: - return cty.BoolVal(v.Value), nil - case *arrayVal: - var diags hcl.Diagnostics - vals := []cty.Value{} - for _, jsonVal := range v.Values { - val, valDiags := (&expression{src: jsonVal}).Value(ctx) - vals = append(vals, val) - diags = append(diags, valDiags...) - } - return cty.TupleVal(vals), diags - case *objectVal: - var diags hcl.Diagnostics - attrs := map[string]cty.Value{} - attrRanges := map[string]hcl.Range{} - known := true - for _, jsonAttr := range v.Attrs { - // In this one context we allow keys to contain interpolation - // expressions too, assuming we're evaluating in interpolation - // mode. This achieves parity with the native syntax where - // object expressions can have dynamic keys, while block contents - // may not. - name, nameDiags := (&expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}).Value(ctx) - valExpr := &expression{src: jsonAttr.Value} - val, valDiags := valExpr.Value(ctx) - diags = append(diags, nameDiags...) - diags = append(diags, valDiags...) - - var err error - name, err = convert.Convert(name, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if name.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: "Cannot use null value as an object key.", - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if !name.IsKnown() { - // This is a bit of a weird case, since our usual rules require - // us to tolerate unknowns and just represent the result as - // best we can but if we don't know the key then we can't - // know the type of our object at all, and thus we must turn - // the whole thing into cty.DynamicVal. This is consistent with - // how this situation is handled in the native syntax. - // We'll keep iterating so we can collect other errors in - // subsequent attributes. - known = false - continue - } - nameStr := name.AsString() - if _, defined := attrs[nameStr]; defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate object attribute", - Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), - Subject: &jsonAttr.NameRange, - Expression: e, - EvalContext: ctx, - }) - continue - } - attrs[nameStr] = val - attrRanges[nameStr] = jsonAttr.NameRange - } - if !known { - // We encountered an unknown key somewhere along the way, so - // we can't know what our type will eventually be. - return cty.DynamicVal, diags - } - return cty.ObjectVal(attrs), diags - case *nullVal: - return cty.NullVal(cty.DynamicPseudoType), nil - default: - // Default to DynamicVal so that ASTs containing invalid nodes can - // still be partially-evaluated. - return cty.DynamicVal, nil - } -} - -func (e *expression) Variables() []hcl.Traversal { - var vars []hcl.Traversal - - switch v := e.src.(type) { - case *stringVal: - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return vars - } - return expr.Variables() - - case *arrayVal: - for _, jsonVal := range v.Values { - vars = append(vars, (&expression{src: jsonVal}).Variables()...) - } - case *objectVal: - for _, jsonAttr := range v.Attrs { - keyExpr := &stringVal{ // we're going to treat key as an expression in this context - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - } - vars = append(vars, (&expression{src: keyExpr}).Variables()...) - vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) - } - } - - return vars -} - -func (e *expression) Range() hcl.Range { - return e.src.Range() -} - -func (e *expression) StartRange() hcl.Range { - return e.src.StartRange() -} - -// Implementation for hcl.AbsTraversalForExpr. -func (e *expression) AsTraversal() hcl.Traversal { - // In JSON-based syntax a traversal is given as a string containing - // traversal syntax as defined by hclsyntax.ParseTraversalAbs. - - switch v := e.src.(type) { - case *stringVal: - traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - return traversal - default: - return nil - } -} - -// Implementation for hcl.ExprCall. -func (e *expression) ExprCall() *hcl.StaticCall { - // In JSON-based syntax a static call is given as a string containing - // an expression in the native syntax that also supports ExprCall. - - switch v := e.src.(type) { - case *stringVal: - expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return nil - } - - return call - default: - return nil - } -} - -// Implementation for hcl.ExprList. -func (e *expression) ExprList() []hcl.Expression { - switch v := e.src.(type) { - case *arrayVal: - ret := make([]hcl.Expression, len(v.Values)) - for i, node := range v.Values { - ret[i] = &expression{src: node} - } - return ret - default: - return nil - } -} - -// Implementation for hcl.ExprMap. -func (e *expression) ExprMap() []hcl.KeyValuePair { - switch v := e.src.(type) { - case *objectVal: - ret := make([]hcl.KeyValuePair, len(v.Attrs)) - for i, jsonAttr := range v.Attrs { - ret[i] = hcl.KeyValuePair{ - Key: &expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}, - Value: &expression{src: jsonAttr.Value}, - } - } - return ret - default: - return nil - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go deleted file mode 100644 index bbcce5b3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. - -package json - -import "strconv" - -const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" - -var _tokenType_map = map[tokenType]string{ - 0: _tokenType_name[0:12], - 44: _tokenType_name[12:22], - 58: _tokenType_name[22:32], - 61: _tokenType_name[32:43], - 75: _tokenType_name[43:55], - 78: _tokenType_name[55:66], - 83: _tokenType_name[66:77], - 91: _tokenType_name[77:88], - 93: _tokenType_name[88:99], - 123: _tokenType_name[99:110], - 125: _tokenType_name[110:121], - 9220: _tokenType_name[121:129], -} - -func (i tokenType) String() string { - if str, ok := _tokenType_map[i]; ok { - return str - } - return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/github.com/hashicorp/hil/.gitignore b/vendor/github.com/hashicorp/hil/.gitignore deleted file mode 100644 index 9d6e5df3..00000000 --- a/vendor/github.com/hashicorp/hil/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.DS_Store -.idea -*.iml diff --git a/vendor/github.com/hashicorp/hil/.travis.yml b/vendor/github.com/hashicorp/hil/.travis.yml deleted file mode 100644 index a7854442..00000000 --- a/vendor/github.com/hashicorp/hil/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -sudo: false -language: go -go: 1.7 diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE deleted file mode 100644 index 82b4de97..00000000 --- a/vendor/github.com/hashicorp/hil/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md deleted file mode 100644 index 186ed251..00000000 --- a/vendor/github.com/hashicorp/hil/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# HIL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) - -HIL (HashiCorp Interpolation Language) is a lightweight embedded language used -primarily for configuration interpolation. The goal of HIL is to make a simple -language for interpolations in the various configurations of HashiCorp tools. - -HIL is built to interpolate any string, but is in use by HashiCorp primarily -with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any -way for use with HIL. - -HIL isn't meant to be a general purpose language. It was built for basic -configuration interpolations. Therefore, you can't currently write functions, -have conditionals, set intermediary variables, etc. within HIL itself. It is -possible some of these may be added later but the right use case must exist. - -## Why? - -Many of our tools have support for something similar to templates, but -within the configuration itself. The most prominent requirement was in -[Terraform](https://github.com/hashicorp/terraform) where we wanted the -configuration to be able to reference values from elsewhere in the -configuration. Example: - - foo = "hi ${var.world}" - -We originally used a full templating language for this, but found it -was too heavy weight. Additionally, many full languages required bindings -to C (and thus the usage of cgo) which we try to avoid to make cross-compilation -easier. We then moved to very basic regular expression based -string replacement, but found the need for basic arithmetic and function -calls resulting in overly complex regular expressions. - -Ultimately, we wrote our own mini-language within Terraform itself. As -we built other projects such as [Nomad](https://nomadproject.io) and -[Otto](https://ottoproject.io), the need for basic interpolations arose -again. - -Thus HIL was born. It is extracted from Terraform, cleaned up, and -better tested for general purpose use. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammer is listed here. - -Code begins within `${` and `}`. Outside of this, text is treated -literally. For example, `foo` is a valid HIL program that is just the -string "foo", but `foo ${bar}` is an HIL program that is the string "foo " -concatened with the value of `bar`. For the remainder of the syntax -docs, we'll assume you're within `${}`. - - * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example - identifiers: `foo`, `var.foo`, `foo-bar`. - - * Strings are double quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Numbers are assumed to be base 10. If you prefix a number with 0x, - it is treated as a hexadecimal. If it is prefixed with 0, it is - treated as an octal. Numbers can be in scientific notation: "1e10". - - * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2` - - * Boolean values: `true`, `false` - - * The following arithmetic operations are allowed: +, -, *, /, %. - - * Function calls are in the form of `name(arg1, arg2, ...)`. Example: - `add(1, 5)`. Arguments can be any valid HIL expression, example: - `add(1, var.foo)` or even nested function calls: - `add(1, get("some value"))`. - - * Within strings, further interpolations can be opened with `${}`. - Example: `"Hello ${nested}"`. A full example including the - original `${}` (remember this list assumes were inside of one - already) could be: `foo ${func("hello ${var.foo}")}`. - -## Language Changes - -We've used this mini-language in Terraform for years. For backwards compatibility -reasons, we're unlikely to make an incompatible change to the language but -we're not currently making that promise, either. - -The internal API of this project may very well change as we evolve it -to work with more of our projects. We recommend using some sort of dependency -management solution with this package. - -## Future Changes - -The following changes are already planned to be made at some point: - - * Richer types: lists, maps, etc. - - * Convert to a more standard Go parser structure similar to HCL. This - will improve our error messaging as well as allow us to have automatic - formatting. - - * Allow interpolations to result in more types than just a string. While - within the interpolation basic types are honored, the result is always - a string. diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml deleted file mode 100644 index feaf7a34..00000000 --- a/vendor/github.com/hashicorp/hil/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\src\github.com\hashicorp\hil -environment: - GOPATH: c:\gopath -init: - - git config --global core.autocrlf true -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go deleted file mode 100644 index 94dc24f8..00000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic.go +++ /dev/null @@ -1,43 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Arithmetic represents a node where the result is arithmetic of -// two or more operands in the order given. -type Arithmetic struct { - Op ArithmeticOp - Exprs []Node - Posx Pos -} - -func (n *Arithmetic) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Arithmetic) Pos() Pos { - return n.Posx -} - -func (n *Arithmetic) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Arithmetic) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Arithmetic) Type(Scope) (Type, error) { - return TypeInt, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go deleted file mode 100644 index 18880c60..00000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go +++ /dev/null @@ -1,24 +0,0 @@ -package ast - -// ArithmeticOp is the operation to use for the math. -type ArithmeticOp int - -const ( - ArithmeticOpInvalid ArithmeticOp = 0 - - ArithmeticOpAdd ArithmeticOp = iota - ArithmeticOpSub - ArithmeticOpMul - ArithmeticOpDiv - ArithmeticOpMod - - ArithmeticOpLogicalAnd - ArithmeticOpLogicalOr - - ArithmeticOpEqual - ArithmeticOpNotEqual - ArithmeticOpLessThan - ArithmeticOpLessThanOrEqual - ArithmeticOpGreaterThan - ArithmeticOpGreaterThanOrEqual -) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go deleted file mode 100644 index c6350f8b..00000000 --- a/vendor/github.com/hashicorp/hil/ast/ast.go +++ /dev/null @@ -1,99 +0,0 @@ -package ast - -import ( - "fmt" -) - -// Node is the interface that all AST nodes must implement. -type Node interface { - // Accept is called to dispatch to the visitors. It must return the - // resulting Node (which might be different in an AST transform). - Accept(Visitor) Node - - // Pos returns the position of this node in some source. - Pos() Pos - - // Type returns the type of this node for the given context. - Type(Scope) (Type, error) -} - -// Pos is the starting position of an AST node -type Pos struct { - Column, Line int // Column/Line number, starting at 1 - Filename string // Optional source filename, if known -} - -func (p Pos) String() string { - if p.Filename == "" { - return fmt.Sprintf("%d:%d", p.Line, p.Column) - } else { - return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) - } -} - -// InitPos is an initiaial position value. This should be used as -// the starting position (presets the column and line to 1). -var InitPos = Pos{Column: 1, Line: 1} - -// Visitors are just implementations of this function. -// -// The function must return the Node to replace this node with. "nil" is -// _not_ a valid return value. If there is no replacement, the original node -// should be returned. We build this replacement directly into the visitor -// pattern since AST transformations are a common and useful tool and -// building it into the AST itself makes it required for future Node -// implementations and very easy to do. -// -// Note that this isn't a true implementation of the visitor pattern, which -// generally requires proper type dispatch on the function. However, -// implementing this basic visitor pattern style is still very useful even -// if you have to type switch. -type Visitor func(Node) Node - -//go:generate stringer -type=Type - -// Type is the type of any value. -type Type uint32 - -const ( - TypeInvalid Type = 0 - TypeAny Type = 1 << iota - TypeBool - TypeString - TypeInt - TypeFloat - TypeList - TypeMap - - // This is a special type used by Terraform to mark "unknown" values. - // It is impossible for this type to be introduced into your HIL programs - // unless you explicitly set a variable to this value. In that case, - // any operation including the variable will return "TypeUnknown" as the - // type. - TypeUnknown -) - -func (t Type) Printable() string { - switch t { - case TypeInvalid: - return "invalid type" - case TypeAny: - return "any type" - case TypeBool: - return "type bool" - case TypeString: - return "type string" - case TypeInt: - return "type int" - case TypeFloat: - return "type float" - case TypeList: - return "type list" - case TypeMap: - return "type map" - case TypeUnknown: - return "type unknown" - default: - return "unknown type" - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go deleted file mode 100644 index 05570110..00000000 --- a/vendor/github.com/hashicorp/hil/ast/call.go +++ /dev/null @@ -1,47 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Call represents a function call. -type Call struct { - Func string - Args []Node - Posx Pos -} - -func (n *Call) Accept(v Visitor) Node { - for i, a := range n.Args { - n.Args[i] = a.Accept(v) - } - - return v(n) -} - -func (n *Call) Pos() Pos { - return n.Posx -} - -func (n *Call) String() string { - args := make([]string, len(n.Args)) - for i, arg := range n.Args { - args[i] = fmt.Sprintf("%s", arg) - } - - return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) -} - -func (n *Call) Type(s Scope) (Type, error) { - f, ok := s.LookupFunc(n.Func) - if !ok { - return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) - } - - return f.ReturnType, nil -} - -func (n *Call) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go deleted file mode 100644 index be48f89d..00000000 --- a/vendor/github.com/hashicorp/hil/ast/conditional.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -type Conditional struct { - CondExpr Node - TrueExpr Node - FalseExpr Node - Posx Pos -} - -// Accept passes the given visitor to the child nodes in this order: -// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. -func (n *Conditional) Accept(v Visitor) Node { - n.CondExpr = n.CondExpr.Accept(v) - n.TrueExpr = n.TrueExpr.Accept(v) - n.FalseExpr = n.FalseExpr.Accept(v) - - return v(n) -} - -func (n *Conditional) Pos() Pos { - return n.Posx -} - -func (n *Conditional) Type(Scope) (Type, error) { - // This is not actually a useful value; the type checker ignores - // this function when analyzing conditionals, just as with Arithmetic. - return TypeInt, nil -} - -func (n *Conditional) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go deleted file mode 100644 index 860c25fd..00000000 --- a/vendor/github.com/hashicorp/hil/ast/index.go +++ /dev/null @@ -1,76 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Index represents an indexing operation into another data structure -type Index struct { - Target Node - Key Node - Posx Pos -} - -func (n *Index) Accept(v Visitor) Node { - n.Target = n.Target.Accept(v) - n.Key = n.Key.Accept(v) - return v(n) -} - -func (n *Index) Pos() Pos { - return n.Posx -} - -func (n *Index) String() string { - return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) -} - -func (n *Index) Type(s Scope) (Type, error) { - variableAccess, ok := n.Target.(*VariableAccess) - if !ok { - return TypeInvalid, fmt.Errorf("target is not a variable") - } - - variable, ok := s.LookupVar(variableAccess.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) - } - - switch variable.Type { - case TypeList: - return n.typeList(variable, variableAccess.Name) - case TypeMap: - return n.typeMap(variable, variableAccess.Name) - default: - return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (n *Index) typeList(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a list - list := variable.Value.([]Variable) - - return VariableListElementTypesAreHomogenous(variableName, list) -} - -func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a map - vmap := variable.Value.(map[string]Variable) - - return VariableMapValueTypesAreHomogenous(variableName, vmap) -} - -func reportTypes(typesFound map[Type]struct{}) string { - stringTypes := make([]string, len(typesFound)) - i := 0 - for k, _ := range typesFound { - stringTypes[0] = k.String() - i++ - } - return strings.Join(stringTypes, ", ") -} - -func (n *Index) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go deleted file mode 100644 index da6014fe..00000000 --- a/vendor/github.com/hashicorp/hil/ast/literal.go +++ /dev/null @@ -1,88 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// LiteralNode represents a single literal value, such as "foo" or -// 42 or 3.14159. Based on the Type, the Value can be safely cast. -type LiteralNode struct { - Value interface{} - Typex Type - Posx Pos -} - -// NewLiteralNode returns a new literal node representing the given -// literal Go value, which must correspond to one of the primitive types -// supported by HIL. Lists and maps cannot currently be constructed via -// this function. -// -// If an inappropriately-typed value is provided, this function will -// return an error. The main intended use of this function is to produce -// "synthetic" literals from constants in code, where the value type is -// well known at compile time. To easily store these in global variables, -// see also MustNewLiteralNode. -func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { - goType := reflect.TypeOf(value) - var hilType Type - - switch goType.Kind() { - case reflect.Bool: - hilType = TypeBool - case reflect.Int: - hilType = TypeInt - case reflect.Float64: - hilType = TypeFloat - case reflect.String: - hilType = TypeString - default: - return nil, fmt.Errorf("unsupported literal node type: %T", value) - } - - return &LiteralNode{ - Value: value, - Typex: hilType, - Posx: pos, - }, nil -} - -// MustNewLiteralNode wraps NewLiteralNode and panics if an error is -// returned, thus allowing valid literal nodes to be easily assigned to -// global variables. -func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { - node, err := NewLiteralNode(value, pos) - if err != nil { - panic(err) - } - return node -} - -func (n *LiteralNode) Accept(v Visitor) Node { - return v(n) -} - -func (n *LiteralNode) Pos() Pos { - return n.Posx -} - -func (n *LiteralNode) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *LiteralNode) String() string { - return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) -} - -func (n *LiteralNode) Type(Scope) (Type, error) { - return n.Typex, nil -} - -// IsUnknown returns true either if the node's value is itself unknown -// of if it is a collection containing any unknown elements, deeply. -func (n *LiteralNode) IsUnknown() bool { - return IsUnknown(Variable{ - Type: n.Typex, - Value: n.Value, - }) -} diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go deleted file mode 100644 index 1e27f970..00000000 --- a/vendor/github.com/hashicorp/hil/ast/output.go +++ /dev/null @@ -1,78 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Output represents the root node of all interpolation evaluations. If the -// output only has one expression which is either a TypeList or TypeMap, the -// Output can be type-asserted to []interface{} or map[string]interface{} -// respectively. Otherwise the Output evaluates as a string, and concatenates -// the evaluation of each expression. -type Output struct { - Exprs []Node - Posx Pos -} - -func (n *Output) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Output) Pos() Pos { - return n.Posx -} - -func (n *Output) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Output) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Output) Type(s Scope) (Type, error) { - // Special case no expressions for backward compatibility - if len(n.Exprs) == 0 { - return TypeString, nil - } - - // Special case a single expression of types list or map - if len(n.Exprs) == 1 { - exprType, err := n.Exprs[0].Type(s) - if err != nil { - return TypeInvalid, err - } - switch exprType { - case TypeList: - return TypeList, nil - case TypeMap: - return TypeMap, nil - } - } - - // Otherwise ensure all our expressions are strings - for index, expr := range n.Exprs { - exprType, err := expr.Type(s) - if err != nil { - return TypeInvalid, err - } - // We only look for things we know we can't coerce with an implicit conversion func - if exprType == TypeList || exprType == TypeMap { - return TypeInvalid, fmt.Errorf( - "multi-expression HIL outputs may only have string inputs: %d is type %s", - index, exprType) - } - } - - return TypeString, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go deleted file mode 100644 index 7a975d99..00000000 --- a/vendor/github.com/hashicorp/hil/ast/scope.go +++ /dev/null @@ -1,90 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// Scope is the interface used to look up variables and functions while -// evaluating. How these functions/variables are defined are up to the caller. -type Scope interface { - LookupFunc(string) (Function, bool) - LookupVar(string) (Variable, bool) -} - -// Variable is a variable value for execution given as input to the engine. -// It records the value of a variables along with their type. -type Variable struct { - Value interface{} - Type Type -} - -// NewVariable creates a new Variable for the given value. This will -// attempt to infer the correct type. If it can't, an error will be returned. -func NewVariable(v interface{}) (result Variable, err error) { - switch v := reflect.ValueOf(v); v.Kind() { - case reflect.String: - result.Type = TypeString - default: - err = fmt.Errorf("Unknown type: %s", v.Kind()) - } - - result.Value = v - return -} - -// String implements Stringer on Variable, displaying the type and value -// of the Variable. -func (v Variable) String() string { - return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) -} - -// Function defines a function that can be executed by the engine. -// The type checker will validate that the proper types will be called -// to the callback. -type Function struct { - // ArgTypes is the list of types in argument order. These are the - // required arguments. - // - // ReturnType is the type of the returned value. The Callback MUST - // return this type. - ArgTypes []Type - ReturnType Type - - // Variadic, if true, says that this function is variadic, meaning - // it takes a variable number of arguments. In this case, the - // VariadicType must be set. - Variadic bool - VariadicType Type - - // Callback is the function called for a function. The argument - // types are guaranteed to match the spec above by the type checker. - // The length of the args is strictly == len(ArgTypes) unless Varidiac - // is true, in which case its >= len(ArgTypes). - Callback func([]interface{}) (interface{}, error) -} - -// BasicScope is a simple scope that looks up variables and functions -// using a map. -type BasicScope struct { - FuncMap map[string]Function - VarMap map[string]Variable -} - -func (s *BasicScope) LookupFunc(n string) (Function, bool) { - if s == nil { - return Function{}, false - } - - v, ok := s.FuncMap[n] - return v, ok -} - -func (s *BasicScope) LookupVar(n string) (Variable, bool) { - if s == nil { - return Variable{}, false - } - - v, ok := s.VarMap[n] - return v, ok -} diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go deleted file mode 100644 index bd2bc157..00000000 --- a/vendor/github.com/hashicorp/hil/ast/stack.go +++ /dev/null @@ -1,25 +0,0 @@ -package ast - -// Stack is a stack of Node. -type Stack struct { - stack []Node -} - -func (s *Stack) Len() int { - return len(s.stack) -} - -func (s *Stack) Push(n Node) { - s.stack = append(s.stack, n) -} - -func (s *Stack) Pop() Node { - x := s.stack[len(s.stack)-1] - s.stack[len(s.stack)-1] = nil - s.stack = s.stack[:len(s.stack)-1] - return x -} - -func (s *Stack) Reset() { - s.stack = nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go deleted file mode 100644 index 1f51a98d..00000000 --- a/vendor/github.com/hashicorp/hil/ast/type_string.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT - -package ast - -import "fmt" - -const ( - _Type_name_0 = "TypeInvalid" - _Type_name_1 = "TypeAny" - _Type_name_2 = "TypeBool" - _Type_name_3 = "TypeString" - _Type_name_4 = "TypeInt" - _Type_name_5 = "TypeFloat" - _Type_name_6 = "TypeList" - _Type_name_7 = "TypeMap" - _Type_name_8 = "TypeUnknown" -) - -var ( - _Type_index_0 = [...]uint8{0, 11} - _Type_index_1 = [...]uint8{0, 7} - _Type_index_2 = [...]uint8{0, 8} - _Type_index_3 = [...]uint8{0, 10} - _Type_index_4 = [...]uint8{0, 7} - _Type_index_5 = [...]uint8{0, 9} - _Type_index_6 = [...]uint8{0, 8} - _Type_index_7 = [...]uint8{0, 7} - _Type_index_8 = [...]uint8{0, 11} -) - -func (i Type) String() string { - switch { - case i == 0: - return _Type_name_0 - case i == 2: - return _Type_name_1 - case i == 4: - return _Type_name_2 - case i == 8: - return _Type_name_3 - case i == 16: - return _Type_name_4 - case i == 32: - return _Type_name_5 - case i == 64: - return _Type_name_6 - case i == 128: - return _Type_name_7 - case i == 256: - return _Type_name_8 - default: - return fmt.Sprintf("Type(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go deleted file mode 100644 index d6ddaecc..00000000 --- a/vendor/github.com/hashicorp/hil/ast/unknown.go +++ /dev/null @@ -1,30 +0,0 @@ -package ast - -// IsUnknown reports whether a variable is unknown or contains any value -// that is unknown. This will recurse into lists and maps and so on. -func IsUnknown(v Variable) bool { - // If it is unknown itself, return true - if v.Type == TypeUnknown { - return true - } - - // If it is a container type, check the values - switch v.Type { - case TypeList: - for _, el := range v.Value.([]Variable) { - if IsUnknown(el) { - return true - } - } - case TypeMap: - for _, el := range v.Value.(map[string]Variable) { - if IsUnknown(el) { - return true - } - } - default: - } - - // Not a container type or survive the above checks - return false -} diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go deleted file mode 100644 index 4c1362d7..00000000 --- a/vendor/github.com/hashicorp/hil/ast/variable_access.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -// VariableAccess represents a variable access. -type VariableAccess struct { - Name string - Posx Pos -} - -func (n *VariableAccess) Accept(v Visitor) Node { - return v(n) -} - -func (n *VariableAccess) Pos() Pos { - return n.Posx -} - -func (n *VariableAccess) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *VariableAccess) String() string { - return fmt.Sprintf("Variable(%s)", n.Name) -} - -func (n *VariableAccess) Type(s Scope) (Type, error) { - v, ok := s.LookupVar(n.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) - } - - return v.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go deleted file mode 100644 index 06bd18de..00000000 --- a/vendor/github.com/hashicorp/hil/ast/variables_helper.go +++ /dev/null @@ -1,63 +0,0 @@ -package ast - -import "fmt" - -func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { - if len(list) == 0 { - return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range list { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "list %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} - -func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { - if len(vmap) == 0 { - return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range vmap { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "map %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go deleted file mode 100644 index 909c788a..00000000 --- a/vendor/github.com/hashicorp/hil/builtins.go +++ /dev/null @@ -1,331 +0,0 @@ -package hil - -import ( - "errors" - "strconv" - - "github.com/hashicorp/hil/ast" -) - -// NOTE: All builtins are tested in engine_test.go - -func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { - if scope == nil { - scope = new(ast.BasicScope) - } - if scope.FuncMap == nil { - scope.FuncMap = make(map[string]ast.Function) - } - - // Implicit conversions - scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() - scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() - scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() - scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() - scope.FuncMap["__builtin_IntToString"] = builtinIntToString() - scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() - scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() - scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() - - // Math operations - scope.FuncMap["__builtin_IntMath"] = builtinIntMath() - scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() - scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() - scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() - scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() - scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() - scope.FuncMap["__builtin_Logical"] = builtinLogical() - return scope -} - -func builtinFloatMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeFloat, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(float64) - for _, raw := range args[2:] { - arg := raw.(float64) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - result /= arg - } - } - - return result, nil - }, - } -} - -func builtinIntMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeInt, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(int) - for _, raw := range args[2:] { - arg := raw.(int) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result /= arg - case ast.ArithmeticOpMod: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result = result % arg - } - } - - return result, nil - }, - } -} - -func builtinBoolCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(bool) - rhs := args[2].(bool) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinFloatCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(float64) - rhs := args[2].(float64) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinIntCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(int) - rhs := args[2].(int) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinStringCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(string) - rhs := args[2].(string) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinLogical() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeBool, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(bool) - for _, raw := range args[2:] { - arg := raw.(bool) - switch op { - case ast.ArithmeticOpLogicalOr: - result = result || arg - case ast.ArithmeticOpLogicalAnd: - result = result && arg - default: - return nil, errors.New("invalid logical operator") - } - } - - return result, nil - }, - } -} - -func builtinFloatToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - return int(args[0].(float64)), nil - }, - } -} - -func builtinFloatToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatFloat( - args[0].(float64), 'g', -1, 64), nil - }, - } -} - -func builtinIntToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - return float64(args[0].(int)), nil - }, - } -} - -func builtinIntToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatInt(int64(args[0].(int)), 10), nil - }, - } -} - -func builtinStringToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseInt(args[0].(string), 0, 0) - if err != nil { - return nil, err - } - - return int(v), nil - }, - } -} - -func builtinStringToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseFloat(args[0].(string), 64) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} - -func builtinBoolToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeBool}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatBool(args[0].(bool)), nil - }, - } -} - -func builtinStringToBool() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseBool(args[0].(string)) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go deleted file mode 100644 index 474f5058..00000000 --- a/vendor/github.com/hashicorp/hil/check_identifier.go +++ /dev/null @@ -1,88 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// IdentifierCheck is a SemanticCheck that checks that all identifiers -// resolve properly and that the right number of arguments are passed -// to functions. -type IdentifierCheck struct { - Scope ast.Scope - - err error - lock sync.Mutex -} - -func (c *IdentifierCheck) Visit(root ast.Node) error { - c.lock.Lock() - defer c.lock.Unlock() - defer c.reset() - root.Accept(c.visit) - return c.err -} - -func (c *IdentifierCheck) visit(raw ast.Node) ast.Node { - if c.err != nil { - return raw - } - - switch n := raw.(type) { - case *ast.Call: - c.visitCall(n) - case *ast.VariableAccess: - c.visitVariableAccess(n) - case *ast.Output: - // Ignore - case *ast.LiteralNode: - // Ignore - default: - // Ignore - } - - // We never do replacement with this visitor - return raw -} - -func (c *IdentifierCheck) visitCall(n *ast.Call) { - // Look up the function in the map - function, ok := c.Scope.LookupFunc(n.Func) - if !ok { - c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func)) - return - } - - // Break up the args into what is variadic and what is required - args := n.Args - if function.Variadic && len(args) > len(function.ArgTypes) { - args = n.Args[:len(function.ArgTypes)] - } - - // Verify the number of arguments - if len(args) != len(function.ArgTypes) { - c.createErr(n, fmt.Sprintf( - "%s: expected %d arguments, got %d", - n.Func, len(function.ArgTypes), len(n.Args))) - return - } -} - -func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) { - // Look up the variable in the map - if _, ok := c.Scope.LookupVar(n.Name); !ok { - c.createErr(n, fmt.Sprintf( - "unknown variable accessed: %s", n.Name)) - return - } -} - -func (c *IdentifierCheck) createErr(n ast.Node, str string) { - c.err = fmt.Errorf("%s: %s", n.Pos(), str) -} - -func (c *IdentifierCheck) reset() { - c.err = nil -} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go deleted file mode 100644 index f16da391..00000000 --- a/vendor/github.com/hashicorp/hil/check_types.go +++ /dev/null @@ -1,668 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// TypeCheck implements ast.Visitor for type checking an AST tree. -// It requires some configuration to look up the type of nodes. -// -// It also optionally will not type error and will insert an implicit -// type conversions for specific types if specified by the Implicit -// field. Note that this is kind of organizationally weird to put into -// this structure but we'd rather do that than duplicate the type checking -// logic multiple times. -type TypeCheck struct { - Scope ast.Scope - - // Implicit is a map of implicit type conversions that we can do, - // and that shouldn't error. The key of the first map is the from type, - // the key of the second map is the to type, and the final string - // value is the function to call (which must be registered in the Scope). - Implicit map[ast.Type]map[ast.Type]string - - // Stack of types. This shouldn't be used directly except by implementations - // of TypeCheckNode. - Stack []ast.Type - - err error - lock sync.Mutex -} - -// TypeCheckNode is the interface that must be implemented by any -// ast.Node that wants to support type-checking. If the type checker -// encounters a node that doesn't implement this, it will error. -type TypeCheckNode interface { - TypeCheck(*TypeCheck) (ast.Node, error) -} - -func (v *TypeCheck) Visit(root ast.Node) error { - v.lock.Lock() - defer v.lock.Unlock() - defer v.reset() - root.Accept(v.visit) - - // If the resulting type is unknown, then just let the whole thing go. - if v.err == errExitUnknown { - v.err = nil - } - - return v.err -} - -func (v *TypeCheck) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - var result ast.Node - var err error - switch n := raw.(type) { - case *ast.Arithmetic: - tc := &typeCheckArithmetic{n} - result, err = tc.TypeCheck(v) - case *ast.Call: - tc := &typeCheckCall{n} - result, err = tc.TypeCheck(v) - case *ast.Conditional: - tc := &typeCheckConditional{n} - result, err = tc.TypeCheck(v) - case *ast.Index: - tc := &typeCheckIndex{n} - result, err = tc.TypeCheck(v) - case *ast.Output: - tc := &typeCheckOutput{n} - result, err = tc.TypeCheck(v) - case *ast.LiteralNode: - tc := &typeCheckLiteral{n} - result, err = tc.TypeCheck(v) - case *ast.VariableAccess: - tc := &typeCheckVariableAccess{n} - result, err = tc.TypeCheck(v) - default: - tc, ok := raw.(TypeCheckNode) - if !ok { - err = fmt.Errorf("unknown node for type check: %#v", raw) - break - } - - result, err = tc.TypeCheck(v) - } - - if err != nil { - pos := raw.Pos() - v.err = fmt.Errorf("At column %d, line %d: %s", - pos.Column, pos.Line, err) - } - - return result -} - -type typeCheckArithmetic struct { - n *ast.Arithmetic -} - -func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { - // The arguments are on the stack in reverse order, so pop them off. - exprs := make([]ast.Type, len(tc.n.Exprs)) - for i, _ := range tc.n.Exprs { - exprs[len(tc.n.Exprs)-1-i] = v.StackPop() - } - - // If any operand is unknown then our result is automatically unknown - for _, ty := range exprs { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - switch tc.n.Op { - case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: - return tc.checkLogical(v, exprs) - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, - ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, - ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: - return tc.checkComparison(v, exprs) - default: - return tc.checkNumeric(v, exprs) - } - -} - -func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - // Determine the resulting type we want. We do this by going over - // every expression until we find one with a type we recognize. - // We do this because the first expr might be a string ("var.foo") - // and we need to know what to implicit to. - mathFunc := "__builtin_IntMath" - mathType := ast.TypeInt - for _, v := range exprs { - // We assume int math but if we find ANY float, the entire - // expression turns into floating point math. - if v == ast.TypeFloat { - mathFunc = "__builtin_FloatMath" - mathType = v - break - } - } - - // Verify the args - for i, arg := range exprs { - if arg != mathType { - cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, mathType, arg) - } - } - - // Modulo doesn't work for floats - if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod { - return nil, fmt.Errorf("modulo cannot be used with floats") - } - - // Return type - v.StackPush(mathType) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: mathFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - if len(exprs) != 2 { - // This should never happen, because the parser never produces - // nodes that violate this. - return nil, fmt.Errorf( - "comparison operators must have exactly two operands", - ) - } - - // The first operand always dictates the type for a comparison. - compareFunc := "" - compareType := exprs[0] - switch compareType { - case ast.TypeBool: - compareFunc = "__builtin_BoolCompare" - case ast.TypeFloat: - compareFunc = "__builtin_FloatCompare" - case ast.TypeInt: - compareFunc = "__builtin_IntCompare" - case ast.TypeString: - compareFunc = "__builtin_StringCompare" - default: - return nil, fmt.Errorf( - "comparison operators apply only to bool, float, int, and string", - ) - } - - // For non-equality comparisons, we will do implicit conversions to - // integer types if possible. In this case, we need to go through and - // determine the type of comparison we're doing to enable the implicit - // conversion. - if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { - compareFunc = "__builtin_IntCompare" - compareType = ast.TypeInt - for _, expr := range exprs { - if expr == ast.TypeFloat { - compareFunc = "__builtin_FloatCompare" - compareType = ast.TypeFloat - break - } - } - } - - // Verify (and possibly, convert) the args - for i, arg := range exprs { - if arg != compareType { - cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, compareType, arg, - ) - } - } - - // Only ints and floats can have the <, >, <= and >= operators applied - switch tc.n.Op { - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: - // anything goes - default: - switch compareType { - case ast.TypeFloat, ast.TypeInt: - // fine - default: - return nil, fmt.Errorf( - "<, >, <= and >= may apply only to int and float values", - ) - } - } - - // Comparison operators always return bool - v.StackPush(ast.TypeBool) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: compareFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - for i, t := range exprs { - if t != ast.TypeBool { - cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) - if cn == nil { - return nil, fmt.Errorf( - "logical operators require boolean operands, not %s", - t, - ) - } - tc.n.Exprs[i] = cn - } - } - - // Return type is always boolean - v.StackPush(ast.TypeBool) - - // Arithmetic nodes are replaced with a call to a built-in function - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: "__builtin_Logical", - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -type typeCheckCall struct { - n *ast.Call -} - -func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the function in the map - function, ok := v.Scope.LookupFunc(tc.n.Func) - if !ok { - return nil, fmt.Errorf("unknown function called: %s", tc.n.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]ast.Type, len(tc.n.Args)) - for i, _ := range tc.n.Args { - args[len(tc.n.Args)-1-i] = v.StackPop() - } - - // Verify the args - for i, expected := range function.ArgTypes { - if expected == ast.TypeAny { - continue - } - - if args[i] == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if args[i] != expected { - cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) - if cn != nil { - tc.n.Args[i] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, i+1, expected.Printable(), args[i].Printable()) - } - } - - // If we're variadic, then verify the types there - if function.Variadic && function.VariadicType != ast.TypeAny { - args = args[len(function.ArgTypes):] - for i, t := range args { - if t == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if t != function.VariadicType { - realI := i + len(function.ArgTypes) - cn := v.ImplicitConversion( - t, function.VariadicType, tc.n.Args[realI]) - if cn != nil { - tc.n.Args[realI] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, realI, - function.VariadicType.Printable(), t.Printable()) - } - } - } - - // Return type - v.StackPush(function.ReturnType) - - return tc.n, nil -} - -type typeCheckConditional struct { - n *ast.Conditional -} - -func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { - // On the stack we have the types of the condition, true and false - // expressions, but they are in reverse order. - falseType := v.StackPop() - trueType := v.StackPop() - condType := v.StackPop() - - if condType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if condType != ast.TypeBool { - cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) - if cn == nil { - return nil, fmt.Errorf( - "condition must be type bool, not %s", condType.Printable(), - ) - } - tc.n.CondExpr = cn - } - - // The types of the true and false expression must match - if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { - - // Since passing around stringified versions of other types is - // common, we pragmatically allow the false expression to dictate - // the result type when the true expression is a string. - if trueType == ast.TypeString { - cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.TrueExpr = cn - trueType = falseType - } else { - cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.FalseExpr = cn - falseType = trueType - } - } - - // Currently list and map types cannot be used, because we cannot - // generally assert that their element types are consistent. - // Such support might be added later, either by improving the type - // system or restricting usage to only variable and literal expressions, - // but for now this is simply prohibited because it doesn't seem to - // be a common enough case to be worth the complexity. - switch trueType { - case ast.TypeList: - return nil, fmt.Errorf( - "conditional operator cannot be used with list values", - ) - case ast.TypeMap: - return nil, fmt.Errorf( - "conditional operator cannot be used with map values", - ) - } - - // Result type (guaranteed to also match falseType due to the above) - if trueType == ast.TypeUnknown { - // falseType may also be unknown, but that's okay because two - // unknowns means our result is unknown anyway. - v.StackPush(falseType) - } else { - v.StackPush(trueType) - } - - return tc.n, nil -} - -type typeCheckOutput struct { - n *ast.Output -} - -func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { - n := tc.n - types := make([]ast.Type, len(n.Exprs)) - for i, _ := range n.Exprs { - types[len(n.Exprs)-1-i] = v.StackPop() - } - - for _, ty := range types { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - // If there is only one argument and it is a list, we evaluate to a list - if len(types) == 1 { - switch t := types[0]; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - v.StackPush(t) - return n, nil - } - } - - // Otherwise, all concat args must be strings, so validate that - resultType := ast.TypeString - for i, t := range types { - - if t == ast.TypeUnknown { - resultType = ast.TypeUnknown - continue - } - - if t != ast.TypeString { - cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) - if cn != nil { - n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t) - } - } - - // This always results in type string, unless there are unknowns - v.StackPush(resultType) - - return n, nil -} - -type typeCheckLiteral struct { - n *ast.LiteralNode -} - -func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) { - v.StackPush(tc.n.Typex) - return tc.n, nil -} - -type typeCheckVariableAccess struct { - n *ast.VariableAccess -} - -func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the variable in the map - variable, ok := v.Scope.LookupVar(tc.n.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", tc.n.Name) - } - - // Add the type to the stack - v.StackPush(variable.Type) - - return tc.n, nil -} - -type typeCheckIndex struct { - n *ast.Index -} - -func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { - keyType := v.StackPop() - targetType := v.StackPop() - - if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - // Ensure we have a VariableAccess as the target - varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) - if !ok { - return nil, fmt.Errorf( - "target of an index must be a VariableAccess node, was %T", tc.n.Target) - } - - // Get the variable - variable, ok := v.Scope.LookupVar(varAccessNode.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", varAccessNode.Name) - } - - switch targetType { - case ast.TypeList: - if keyType != ast.TypeInt { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be an int, was %s", keyType) - } - } - - valType, err := ast.VariableListElementTypesAreHomogenous( - varAccessNode.Name, variable.Value.([]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - case ast.TypeMap: - if keyType != ast.TypeString { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be a string, was %s", keyType) - } - } - - valType, err := ast.VariableMapValueTypesAreHomogenous( - varAccessNode.Name, variable.Value.(map[string]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - default: - return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (v *TypeCheck) ImplicitConversion( - actual ast.Type, expected ast.Type, n ast.Node) ast.Node { - if v.Implicit == nil { - return nil - } - - fromMap, ok := v.Implicit[actual] - if !ok { - return nil - } - - toFunc, ok := fromMap[expected] - if !ok { - return nil - } - - return &ast.Call{ - Func: toFunc, - Args: []ast.Node{n}, - Posx: n.Pos(), - } -} - -func (v *TypeCheck) reset() { - v.Stack = nil - v.err = nil -} - -func (v *TypeCheck) StackPush(t ast.Type) { - v.Stack = append(v.Stack, t) -} - -func (v *TypeCheck) StackPop() ast.Type { - var x ast.Type - x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] - return x -} - -func (v *TypeCheck) StackPeek() ast.Type { - if len(v.Stack) == 0 { - return ast.TypeInvalid - } - - return v.Stack[len(v.Stack)-1] -} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go deleted file mode 100644 index 184e029b..00000000 --- a/vendor/github.com/hashicorp/hil/convert.go +++ /dev/null @@ -1,174 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/mapstructure" -) - -// UnknownValue is a sentinel value that can be used to denote -// that a value of a variable (or map element, list element, etc.) -// is unknown. This will always have the type ast.TypeUnknown. -const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -var hilMapstructureDecodeHookSlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookMap map[string]interface{} - -// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode -// but has a DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert %s into a %s", source, target) - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -func InterfaceToVariable(input interface{}) (ast.Variable, error) { - if iv, ok := input.(ast.Variable); ok { - return iv, nil - } - - // This is just to maintain backward compatibility - // after https://github.com/mitchellh/mapstructure/pull/98 - if v, ok := input.([]ast.Variable); ok { - return ast.Variable{ - Type: ast.TypeList, - Value: v, - }, nil - } - if v, ok := input.(map[string]ast.Variable); ok { - return ast.Variable{ - Type: ast.TypeMap, - Value: v, - }, nil - } - - var stringVal string - if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { - // Special case the unknown value to turn into "unknown" - if stringVal == UnknownValue { - return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil - } - - // Otherwise return the string value - return ast.Variable{ - Type: ast.TypeString, - Value: stringVal, - }, nil - } - - var mapVal map[string]interface{} - if err := hilMapstructureWeakDecode(input, &mapVal); err == nil { - elements := make(map[string]ast.Variable) - for i, element := range mapVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeMap, - Value: elements, - }, nil - } - - var sliceVal []interface{} - if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil { - elements := make([]ast.Variable, len(sliceVal)) - for i, element := range sliceVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeList, - Value: elements, - }, nil - } - - return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) -} - -func VariableToInterface(input ast.Variable) (interface{}, error) { - if input.Type == ast.TypeString { - if inputStr, ok := input.Value.(string); ok { - return inputStr, nil - } else { - return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") - } - } - - if input.Type == ast.TypeList { - inputList, ok := input.Value.([]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") - } - - result := make([]interface{}, 0) - if len(inputList) == 0 { - return result, nil - } - - for _, element := range inputList { - if convertedElement, err := VariableToInterface(element); err == nil { - result = append(result, convertedElement) - } else { - return nil, err - } - } - - return result, nil - } - - if input.Type == ast.TypeMap { - inputMap, ok := input.Value.(map[string]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") - } - - result := make(map[string]interface{}, 0) - if len(inputMap) == 0 { - return result, nil - } - - for key, value := range inputMap { - if convertedValue, err := VariableToInterface(value); err == nil { - result[key] = convertedValue - } else { - return nil, err - } - } - - return result, nil - } - - return nil, fmt.Errorf("unknown input type: %s", input.Type) -} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go deleted file mode 100644 index 27820769..00000000 --- a/vendor/github.com/hashicorp/hil/eval.go +++ /dev/null @@ -1,472 +0,0 @@ -package hil - -import ( - "bytes" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// EvalConfig is the configuration for evaluating. -type EvalConfig struct { - // GlobalScope is the global scope of execution for evaluation. - GlobalScope *ast.BasicScope - - // SemanticChecks is a list of additional semantic checks that will be run - // on the tree prior to evaluating it. The type checker, identifier checker, - // etc. will be run before these automatically. - SemanticChecks []SemanticChecker -} - -// SemanticChecker is the type that must be implemented to do a -// semantic check on an AST tree. This will be called with the root node. -type SemanticChecker func(ast.Node) error - -// EvaluationResult is a struct returned from the hil.Eval function, -// representing the result of an interpolation. Results are returned in their -// "natural" Go structure rather than in terms of the HIL AST. For the types -// currently implemented, this means that the Value field can be interpreted as -// the following Go types: -// TypeInvalid: undefined -// TypeString: string -// TypeList: []interface{} -// TypeMap: map[string]interface{} -// TypBool: bool -type EvaluationResult struct { - Type EvalType - Value interface{} -} - -// InvalidResult is a structure representing the result of a HIL interpolation -// which has invalid syntax, missing variables, or some other type of error. -// The error is described out of band in the accompanying error return value. -var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} - -// errExitUnknown is an internal error that when returned means the result -// is an unknown value. We use this for early exit. -var errExitUnknown = errors.New("unknown value") - -func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { - output, outputType, err := internalEval(root, config) - if err != nil { - return InvalidResult, err - } - - // If the result contains any nested unknowns then the result as a whole - // is unknown, so that callers only have to deal with "entirely known" - // or "entirely unknown" as outcomes. - if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { - outputType = ast.TypeUnknown - output = UnknownValue - } - - switch outputType { - case ast.TypeList: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeList, - Value: output, - }) - return EvaluationResult{ - Type: TypeList, - Value: val, - }, err - case ast.TypeMap: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeMap, - Value: output, - }) - return EvaluationResult{ - Type: TypeMap, - Value: val, - }, err - case ast.TypeString: - return EvaluationResult{ - Type: TypeString, - Value: output, - }, nil - case ast.TypeBool: - return EvaluationResult{ - Type: TypeBool, - Value: output, - }, nil - case ast.TypeUnknown: - return EvaluationResult{ - Type: TypeUnknown, - Value: UnknownValue, - }, nil - default: - return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) - } -} - -// Eval evaluates the given AST tree and returns its output value, the type -// of the output, and any error that occurred. -func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) { - // Copy the scope so we can add our builtins - if config == nil { - config = new(EvalConfig) - } - scope := registerBuiltins(config.GlobalScope) - implicitMap := map[ast.Type]map[ast.Type]string{ - ast.TypeFloat: { - ast.TypeInt: "__builtin_FloatToInt", - ast.TypeString: "__builtin_FloatToString", - }, - ast.TypeInt: { - ast.TypeFloat: "__builtin_IntToFloat", - ast.TypeString: "__builtin_IntToString", - }, - ast.TypeString: { - ast.TypeInt: "__builtin_StringToInt", - ast.TypeFloat: "__builtin_StringToFloat", - ast.TypeBool: "__builtin_StringToBool", - }, - ast.TypeBool: { - ast.TypeString: "__builtin_BoolToString", - }, - } - - // Build our own semantic checks that we always run - tv := &TypeCheck{Scope: scope, Implicit: implicitMap} - ic := &IdentifierCheck{Scope: scope} - - // Build up the semantic checks for execution - checks := make( - []SemanticChecker, - len(config.SemanticChecks), - len(config.SemanticChecks)+2) - copy(checks, config.SemanticChecks) - checks = append(checks, ic.Visit) - checks = append(checks, tv.Visit) - - // Run the semantic checks - for _, check := range checks { - if err := check(root); err != nil { - return nil, ast.TypeInvalid, err - } - } - - // Execute - v := &evalVisitor{Scope: scope} - return v.Visit(root) -} - -// EvalNode is the interface that must be implemented by any ast.Node -// to support evaluation. This will be called in visitor pattern order. -// The result of each call to Eval is automatically pushed onto the -// stack as a LiteralNode. Pop elements off the stack to get child -// values. -type EvalNode interface { - Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) -} - -type evalVisitor struct { - Scope ast.Scope - Stack ast.Stack - - err error - lock sync.Mutex -} - -func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { - // Run the actual visitor pattern - root.Accept(v.visit) - - // Get our result and clear out everything else - var result *ast.LiteralNode - if v.Stack.Len() > 0 { - result = v.Stack.Pop().(*ast.LiteralNode) - } else { - result = new(ast.LiteralNode) - } - resultErr := v.err - if resultErr == errExitUnknown { - // This means the return value is unknown and we used the error - // as an early exit mechanism. Reset since the value on the stack - // should be the unknown value. - resultErr = nil - } - - // Clear everything else so we aren't just dangling - v.Stack.Reset() - v.err = nil - - t, err := result.Type(v.Scope) - if err != nil { - return nil, ast.TypeInvalid, err - } - - return result.Value, t, resultErr -} - -func (v *evalVisitor) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - en, err := evalNode(raw) - if err != nil { - v.err = err - return raw - } - - out, outType, err := en.Eval(v.Scope, &v.Stack) - if err != nil { - v.err = err - return raw - } - - v.Stack.Push(&ast.LiteralNode{ - Value: out, - Typex: outType, - }) - - if outType == ast.TypeUnknown { - // Halt immediately - v.err = errExitUnknown - return raw - } - - return raw -} - -// evalNode is a private function that returns an EvalNode for built-in -// types as well as any other EvalNode implementations. -func evalNode(raw ast.Node) (EvalNode, error) { - switch n := raw.(type) { - case *ast.Index: - return &evalIndex{n}, nil - case *ast.Call: - return &evalCall{n}, nil - case *ast.Conditional: - return &evalConditional{n}, nil - case *ast.Output: - return &evalOutput{n}, nil - case *ast.LiteralNode: - return &evalLiteralNode{n}, nil - case *ast.VariableAccess: - return &evalVariableAccess{n}, nil - default: - en, ok := n.(EvalNode) - if !ok { - return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw) - } - - return en, nil - } -} - -type evalCall struct{ *ast.Call } - -func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // Look up the function in the map - function, ok := s.LookupFunc(v.Func) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown function called: %s", v.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]interface{}, len(v.Args)) - for i, _ := range v.Args { - node := stack.Pop().(*ast.LiteralNode) - if node.IsUnknown() { - // If any arguments are unknown then the result is automatically unknown - return UnknownValue, ast.TypeUnknown, nil - } - args[len(v.Args)-1-i] = node.Value - } - - // Call the function - result, err := function.Callback(args) - if err != nil { - return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err) - } - - return result, function.ReturnType, nil -} - -type evalConditional struct{ *ast.Conditional } - -func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // On the stack we have literal nodes representing the resulting values - // of the condition, true and false expressions, but they are in reverse - // order. - falseLit := stack.Pop().(*ast.LiteralNode) - trueLit := stack.Pop().(*ast.LiteralNode) - condLit := stack.Pop().(*ast.LiteralNode) - - if condLit.IsUnknown() { - // If our conditional is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - if condLit.Value.(bool) { - return trueLit.Value, trueLit.Typex, nil - } else { - return falseLit.Value, trueLit.Typex, nil - } -} - -type evalIndex struct{ *ast.Index } - -func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - key := stack.Pop().(*ast.LiteralNode) - target := stack.Pop().(*ast.LiteralNode) - - variableName := v.Index.Target.(*ast.VariableAccess).Name - - if key.IsUnknown() { - // If our key is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - // For target, we'll accept collections containing unknown values but - // we still need to catch when the collection itself is unknown, shallowly. - if target.Typex == ast.TypeUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - switch target.Typex { - case ast.TypeList: - return v.evalListIndex(variableName, target.Value, key.Value) - case ast.TypeMap: - return v.evalMapIndex(variableName, target.Value, key.Value) - default: - return nil, ast.TypeInvalid, fmt.Errorf( - "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", - variableName, target.Typex) - } -} - -func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a list and key is an int - list, ok := target.([]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to []Variable, is: %T", target) - } - - keyInt, ok := key.(int) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to int, is: %T", key) - } - - if len(list) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("list is empty") - } - - if keyInt < 0 || len(list) < keyInt+1 { - return nil, ast.TypeInvalid, fmt.Errorf( - "index %d out of range for list %s (max %d)", - keyInt, variableName, len(list)) - } - - returnVal := list[keyInt].Value - returnType := list[keyInt].Type - return returnVal, returnType, nil -} - -func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a map and key is a string - vmap, ok := target.(map[string]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to map[string]Variable, is: %T", target) - } - - keyString, ok := key.(string) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to string, is: %T", key) - } - - if len(vmap) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("map is empty") - } - - value, ok := vmap[keyString] - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "key %q does not exist in map %s", keyString, variableName) - } - - return value.Value, value.Type, nil -} - -type evalOutput struct{ *ast.Output } - -func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // The expressions should all be on the stack in reverse - // order. So pop them off, reverse their order, and concatenate. - nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) - haveUnknown := false - for range v.Exprs { - n := stack.Pop().(*ast.LiteralNode) - nodes = append(nodes, n) - - // If we have any unknowns then the whole result is unknown - // (we must deal with this first, because the type checker can - // skip type conversions in the presence of unknowns, and thus - // any of our other nodes may be incorrectly typed.) - if n.IsUnknown() { - haveUnknown = true - } - } - - if haveUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - // Special case the single list and map - if len(nodes) == 1 { - switch t := nodes[0].Typex; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - fallthrough - case ast.TypeUnknown: - return nodes[0].Value, t, nil - } - } - - // Otherwise concatenate the strings - var buf bytes.Buffer - for i := len(nodes) - 1; i >= 0; i-- { - if nodes[i].Typex != ast.TypeString { - return nil, ast.TypeInvalid, fmt.Errorf( - "invalid output with %s value at index %d: %#v", - nodes[i].Typex, - i, - nodes[i].Value, - ) - } - buf.WriteString(nodes[i].Value.(string)) - } - - return buf.String(), ast.TypeString, nil -} - -type evalLiteralNode struct{ *ast.LiteralNode } - -func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) { - return v.Value, v.Typex, nil -} - -type evalVariableAccess struct{ *ast.VariableAccess } - -func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) { - // Look up the variable in the map - variable, ok := scope.LookupVar(v.Name) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown variable accessed: %s", v.Name) - } - - return variable.Value, variable.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go deleted file mode 100644 index 6946ecd2..00000000 --- a/vendor/github.com/hashicorp/hil/eval_type.go +++ /dev/null @@ -1,16 +0,0 @@ -package hil - -//go:generate stringer -type=EvalType eval_type.go - -// EvalType represents the type of the output returned from a HIL -// evaluation. -type EvalType uint32 - -const ( - TypeInvalid EvalType = 0 - TypeString EvalType = 1 << iota - TypeBool - TypeList - TypeMap - TypeUnknown -) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go deleted file mode 100644 index b107ddd4..00000000 --- a/vendor/github.com/hashicorp/hil/evaltype_string.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT - -package hil - -import "fmt" - -const ( - _EvalType_name_0 = "TypeInvalid" - _EvalType_name_1 = "TypeString" - _EvalType_name_2 = "TypeBool" - _EvalType_name_3 = "TypeList" - _EvalType_name_4 = "TypeMap" - _EvalType_name_5 = "TypeUnknown" -) - -var ( - _EvalType_index_0 = [...]uint8{0, 11} - _EvalType_index_1 = [...]uint8{0, 10} - _EvalType_index_2 = [...]uint8{0, 8} - _EvalType_index_3 = [...]uint8{0, 8} - _EvalType_index_4 = [...]uint8{0, 7} - _EvalType_index_5 = [...]uint8{0, 11} -) - -func (i EvalType) String() string { - switch { - case i == 0: - return _EvalType_name_0 - case i == 2: - return _EvalType_name_1 - case i == 4: - return _EvalType_name_2 - case i == 8: - return _EvalType_name_3 - case i == 16: - return _EvalType_name_4 - case i == 32: - return _EvalType_name_5 - default: - return fmt.Sprintf("EvalType(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go deleted file mode 100644 index ecbe1fdb..00000000 --- a/vendor/github.com/hashicorp/hil/parse.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/parser" - "github.com/hashicorp/hil/scanner" -) - -// Parse parses the given program and returns an executable AST tree. -// -// Syntax errors are returned with error having the dynamic type -// *parser.ParseError, which gives the caller access to the source position -// where the error was found, which allows (for example) combining it with -// a known source filename to add context to the error message. -func Parse(v string) (ast.Node, error) { - return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) -} - -// ParseWithPosition is like Parse except that it overrides the source -// row and column position of the first character in the string, which should -// be 1-based. -// -// This can be used when HIL is embedded in another language and the outer -// parser knows the row and column where the HIL expression started within -// the overall source file. -func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { - ch := scanner.Scan(v, pos) - return parser.Parse(ch) -} diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go deleted file mode 100644 index 2e013e01..00000000 --- a/vendor/github.com/hashicorp/hil/parser/binary_op.go +++ /dev/null @@ -1,45 +0,0 @@ -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -var binaryOps []map[scanner.TokenType]ast.ArithmeticOp - -func init() { - // This operation table maps from the operator's scanner token type - // to the AST arithmetic operation. All expressions produced from - // binary operators are *ast.Arithmetic nodes. - // - // Binary operator groups are listed in order of precedence, with - // the *lowest* precedence first. Operators within the same group - // have left-to-right associativity. - binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{ - { - scanner.OR: ast.ArithmeticOpLogicalOr, - }, - { - scanner.AND: ast.ArithmeticOpLogicalAnd, - }, - { - scanner.EQUAL: ast.ArithmeticOpEqual, - scanner.NOTEQUAL: ast.ArithmeticOpNotEqual, - }, - { - scanner.GT: ast.ArithmeticOpGreaterThan, - scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual, - scanner.LT: ast.ArithmeticOpLessThan, - scanner.LTE: ast.ArithmeticOpLessThanOrEqual, - }, - { - scanner.PLUS: ast.ArithmeticOpAdd, - scanner.MINUS: ast.ArithmeticOpSub, - }, - { - scanner.STAR: ast.ArithmeticOpMul, - scanner.SLASH: ast.ArithmeticOpDiv, - scanner.PERCENT: ast.ArithmeticOpMod, - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go deleted file mode 100644 index bacd6964..00000000 --- a/vendor/github.com/hashicorp/hil/parser/error.go +++ /dev/null @@ -1,38 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -type ParseError struct { - Message string - Pos ast.Pos -} - -func Errorf(pos ast.Pos, format string, args ...interface{}) error { - return &ParseError{ - Message: fmt.Sprintf(format, args...), - Pos: pos, - } -} - -// TokenErrorf is a convenient wrapper around Errorf that uses the -// position of the given token. -func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error { - return Errorf(token.Pos, format, args...) -} - -func ExpectationError(wanted string, got *scanner.Token) error { - return TokenErrorf(got, "expected %s but found %s", wanted, got) -} - -func (e *ParseError) Error() string { - return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message) -} - -func (e *ParseError) String() string { - return e.Error() -} diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go deleted file mode 100644 index de954f38..00000000 --- a/vendor/github.com/hashicorp/hil/parser/fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build gofuzz - -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -// This is a fuzz testing function designed to be used with go-fuzz: -// https://github.com/dvyukov/go-fuzz -// -// It's not included in a normal build due to the gofuzz build tag above. -// -// There are some input files that you can use as a seed corpus for go-fuzz -// in the directory ./fuzz-corpus . - -func Fuzz(data []byte) int { - str := string(data) - - ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1}) - _, err := Parse(ch) - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go deleted file mode 100644 index 376f1c49..00000000 --- a/vendor/github.com/hashicorp/hil/parser/parser.go +++ /dev/null @@ -1,522 +0,0 @@ -package parser - -import ( - "strconv" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -func Parse(ch <-chan *scanner.Token) (ast.Node, error) { - peeker := scanner.NewPeeker(ch) - parser := &parser{peeker} - output, err := parser.ParseTopLevel() - peeker.Close() - return output, err -} - -type parser struct { - peeker *scanner.Peeker -} - -func (p *parser) ParseTopLevel() (ast.Node, error) { - return p.parseInterpolationSeq(false) -} - -func (p *parser) ParseQuoted() (ast.Node, error) { - return p.parseInterpolationSeq(true) -} - -// parseInterpolationSeq parses either the top-level sequence of literals -// and interpolation expressions or a similar sequence within a quoted -// string inside an interpolation expression. The latter case is requested -// by setting 'quoted' to true. -func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) { - literalType := scanner.LITERAL - endType := scanner.EOF - if quoted { - // exceptions for quoted sequences - literalType = scanner.STRING - endType = scanner.CQUOTE - } - - startPos := p.peeker.Peek().Pos - - if quoted { - tok := p.peeker.Read() - if tok.Type != scanner.OQUOTE { - return nil, ExpectationError("open quote", tok) - } - } - - var exprs []ast.Node - for { - tok := p.peeker.Read() - - if tok.Type == endType { - break - } - - switch tok.Type { - case literalType: - val, err := p.parseStringToken(tok) - if err != nil { - return nil, err - } - exprs = append(exprs, &ast.LiteralNode{ - Value: val, - Typex: ast.TypeString, - Posx: tok.Pos, - }) - case scanner.BEGIN: - expr, err := p.ParseInterpolation() - if err != nil { - return nil, err - } - exprs = append(exprs, expr) - default: - return nil, ExpectationError(`"${"`, tok) - } - } - - if len(exprs) == 0 { - // If we have no parts at all then the input must've - // been an empty string. - exprs = append(exprs, &ast.LiteralNode{ - Value: "", - Typex: ast.TypeString, - Posx: startPos, - }) - } - - // As a special case, if our "Output" contains only one expression - // and it's a literal string then we'll hoist it up to be our - // direct return value, so callers can easily recognize a string - // that has no interpolations at all. - if len(exprs) == 1 { - if lit, ok := exprs[0].(*ast.LiteralNode); ok { - if lit.Typex == ast.TypeString { - return lit, nil - } - } - } - - return &ast.Output{ - Exprs: exprs, - Posx: startPos, - }, nil -} - -// parseStringToken takes a token of either LITERAL or STRING type and -// returns the interpreted string, after processing any relevant -// escape sequences. -func (p *parser) parseStringToken(tok *scanner.Token) (string, error) { - var backslashes bool - switch tok.Type { - case scanner.LITERAL: - backslashes = false - case scanner.STRING: - backslashes = true - default: - panic("unsupported string token type") - } - - raw := []byte(tok.Content) - buf := make([]byte, 0, len(raw)) - - for i := 0; i < len(raw); i++ { - b := raw[i] - more := len(raw) > (i + 1) - - if b == '$' { - if more && raw[i+1] == '$' { - // skip over the second dollar sign - i++ - } - } else if backslashes && b == '\\' { - if !more { - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `unfinished backslash escape sequence`, - ) - } - escapeType := raw[i+1] - switch escapeType { - case '\\': - // skip over the second slash - i++ - case 'n': - b = '\n' - i++ - case '"': - b = '"' - i++ - default: - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `invalid backslash escape sequence`, - ) - } - } - - buf = append(buf, b) - } - - return string(buf), nil -} - -func (p *parser) ParseInterpolation() (ast.Node, error) { - // By the time we're called, we're already "inside" the ${ sequence - // because the caller consumed the ${ token. - - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - - err = p.requireTokenType(scanner.END, `"}"`) - if err != nil { - return nil, err - } - - return expr, nil -} - -func (p *parser) ParseExpression() (ast.Node, error) { - return p.parseTernaryCond() -} - -func (p *parser) parseTernaryCond() (ast.Node, error) { - // The ternary condition operator (.. ? .. : ..) behaves somewhat - // like a binary operator except that the "operator" is itself - // an expression enclosed in two punctuation characters. - // The middle expression is parsed as if the ? and : symbols - // were parentheses. The "rhs" (the "false expression") is then - // treated right-associatively so it behaves similarly to the - // middle in terms of precedence. - - startPos := p.peeker.Peek().Pos - - var cond, trueExpr, falseExpr ast.Node - var err error - - cond, err = p.parseBinaryOps(binaryOps) - if err != nil { - return nil, err - } - - next := p.peeker.Peek() - if next.Type != scanner.QUESTION { - return cond, nil - } - - p.peeker.Read() // eat question mark - - trueExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - colon := p.peeker.Read() - if colon.Type != scanner.COLON { - return nil, ExpectationError(":", colon) - } - - falseExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - return &ast.Conditional{ - CondExpr: cond, - TrueExpr: trueExpr, - FalseExpr: falseExpr, - Posx: startPos, - }, nil -} - -// parseBinaryOps calls itself recursively to work through all of the -// operator precedence groups, and then eventually calls ParseExpressionTerm -// for each operand. -func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) { - if len(ops) == 0 { - // We've run out of operators, so now we'll just try to parse a term. - return p.ParseExpressionTerm() - } - - thisLevel := ops[0] - remaining := ops[1:] - - startPos := p.peeker.Peek().Pos - - var lhs, rhs ast.Node - operator := ast.ArithmeticOpInvalid - var err error - - // parse a term that might be the first operand of a binary - // expression or it might just be a standalone term, but - // we won't know until we've parsed it and can look ahead - // to see if there's an operator token. - lhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - - // We'll keep eating up arithmetic operators until we run - // out, so that operators with the same precedence will combine in a - // left-associative manner: - // a+b+c => (a+b)+c, not a+(b+c) - // - // Should we later want to have right-associative operators, a way - // to achieve that would be to call back up to ParseExpression here - // instead of iteratively parsing only the remaining operators. - for { - next := p.peeker.Peek() - var newOperator ast.ArithmeticOp - var ok bool - if newOperator, ok = thisLevel[next.Type]; !ok { - break - } - - // Are we extending an expression started on - // the previous iteration? - if operator != ast.ArithmeticOpInvalid { - lhs = &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - } - } - - operator = newOperator - p.peeker.Read() // eat operator token - rhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - } - - if operator != ast.ArithmeticOpInvalid { - return &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - }, nil - } else { - return lhs, nil - } -} - -func (p *parser) ParseExpressionTerm() (ast.Node, error) { - - next := p.peeker.Peek() - - switch next.Type { - - case scanner.OPAREN: - p.peeker.Read() - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CPAREN, `")"`) - return expr, err - - case scanner.OQUOTE: - return p.ParseQuoted() - - case scanner.INTEGER: - tok := p.peeker.Read() - val, err := strconv.Atoi(tok.Content) - if err != nil { - return nil, TokenErrorf(tok, "invalid integer: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeInt, - Posx: tok.Pos, - }, nil - - case scanner.FLOAT: - tok := p.peeker.Read() - val, err := strconv.ParseFloat(tok.Content, 64) - if err != nil { - return nil, TokenErrorf(tok, "invalid float: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeFloat, - Posx: tok.Pos, - }, nil - - case scanner.BOOL: - tok := p.peeker.Read() - // the scanner guarantees that tok.Content is either "true" or "false" - var val bool - if tok.Content[0] == 't' { - val = true - } else { - val = false - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeBool, - Posx: tok.Pos, - }, nil - - case scanner.MINUS: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5) - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents negative numbers as - // a binary subtraction of the number from zero. - return &ast.Arithmetic{ - Op: ast.ArithmeticOpSub, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: 0, - Typex: ast.TypeInt, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.BANG: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents binary negation as an equality - // test with "false". - return &ast.Arithmetic{ - Op: ast.ArithmeticOpEqual, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: false, - Typex: ast.TypeBool, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.IDENTIFIER: - return p.ParseScopeInteraction() - - default: - return nil, ExpectationError("expression", next) - } -} - -// ParseScopeInteraction parses the expression types that interact -// with the evaluation scope: variable access, function calls, and -// indexing. -// -// Indexing should actually be a distinct operator in its own right, -// so that e.g. it can be applied to the result of a function call, -// but for now we're preserving the behavior of the older yacc-based -// parser. -func (p *parser) ParseScopeInteraction() (ast.Node, error) { - first := p.peeker.Read() - startPos := first.Pos - if first.Type != scanner.IDENTIFIER { - return nil, ExpectationError("identifier", first) - } - - next := p.peeker.Peek() - if next.Type == scanner.OPAREN { - // function call - funcName := first.Content - p.peeker.Read() // eat paren - var args []ast.Node - - for { - if p.peeker.Peek().Type == scanner.CPAREN { - break - } - - arg, err := p.ParseExpression() - if err != nil { - return nil, err - } - - args = append(args, arg) - - if p.peeker.Peek().Type == scanner.COMMA { - p.peeker.Read() // eat comma - continue - } else { - break - } - } - - err := p.requireTokenType(scanner.CPAREN, `")"`) - if err != nil { - return nil, err - } - - return &ast.Call{ - Func: funcName, - Args: args, - Posx: startPos, - }, nil - } - - varNode := &ast.VariableAccess{ - Name: first.Content, - Posx: startPos, - } - - if p.peeker.Peek().Type == scanner.OBRACKET { - // index operator - startPos := p.peeker.Read().Pos // eat bracket - indexExpr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CBRACKET, `"]"`) - if err != nil { - return nil, err - } - return &ast.Index{ - Target: varNode, - Key: indexExpr, - Posx: startPos, - }, nil - } - - return varNode, nil -} - -// requireTokenType consumes the next token an returns an error if its -// type does not match the given type. nil is returned if the type matches. -// -// This is a helper around peeker.Read() for situations where the parser just -// wants to assert that a particular token type must be present. -func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error { - token := p.peeker.Read() - if token.Type != wantType { - return ExpectationError(wantName, token) - } - return nil -} diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go deleted file mode 100644 index 4de37283..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/peeker.go +++ /dev/null @@ -1,55 +0,0 @@ -package scanner - -// Peeker is a utility that wraps a token channel returned by Scan and -// provides an interface that allows a caller (e.g. the parser) to -// work with the token stream in a mode that allows one token of lookahead, -// and provides utilities for more convenient processing of the stream. -type Peeker struct { - ch <-chan *Token - peeked *Token -} - -func NewPeeker(ch <-chan *Token) *Peeker { - return &Peeker{ - ch: ch, - } -} - -// Peek returns the next token in the stream without consuming it. A -// subsequent call to Read will return the same token. -func (p *Peeker) Peek() *Token { - if p.peeked == nil { - p.peeked = <-p.ch - } - return p.peeked -} - -// Read consumes the next token in the stream and returns it. -func (p *Peeker) Read() *Token { - token := p.Peek() - - // As a special case, we will produce the EOF token forever once - // it is reached. - if token.Type != EOF { - p.peeked = nil - } - - return token -} - -// Close ensures that the token stream has been exhausted, to prevent -// the goroutine in the underlying scanner from leaking. -// -// It's not necessary to call this if the caller reads the token stream -// to EOF, since that implicitly closes the scanner. -func (p *Peeker) Close() { - for _ = range p.ch { - // discard - } - // Install a synthetic EOF token in 'peeked' in case someone - // erroneously calls Peek() or Read() after we've closed. - p.peeked = &Token{ - Type: EOF, - Content: "", - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go deleted file mode 100644 index 86085de0..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ /dev/null @@ -1,556 +0,0 @@ -package scanner - -import ( - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" -) - -// Scan returns a channel that recieves Tokens from the given input string. -// -// The scanner's job is just to partition the string into meaningful parts. -// It doesn't do any transformation of the raw input string, so the caller -// must deal with any further interpretation required, such as parsing INTEGER -// tokens into real ints, or dealing with escape sequences in LITERAL or -// STRING tokens. -// -// Strings in the returned tokens are slices from the original string. -// -// startPos should be set to ast.InitPos unless the caller knows that -// this interpolation string is part of a larger file and knows the position -// of the first character in that larger file. -func Scan(s string, startPos ast.Pos) <-chan *Token { - ch := make(chan *Token) - go scan(s, ch, startPos) - return ch -} - -func scan(s string, ch chan<- *Token, pos ast.Pos) { - // 'remain' starts off as the whole string but we gradually - // slice of the front of it as we work our way through. - remain := s - - // nesting keeps track of how many ${ .. } sequences we are - // inside, so we can recognize the minor differences in syntax - // between outer string literals (LITERAL tokens) and quoted - // string literals (STRING tokens). - nesting := 0 - - // We're going to flip back and forth between parsing literals/strings - // and parsing interpolation sequences ${ .. } until we reach EOF or - // some INVALID token. -All: - for { - startPos := pos - // Literal string processing first, since the beginning of - // a string is always outside of an interpolation sequence. - literalVal, terminator := scanLiteral(remain, pos, nesting > 0) - - if len(literalVal) > 0 { - litType := LITERAL - if nesting > 0 { - litType = STRING - } - ch <- &Token{ - Type: litType, - Content: literalVal, - Pos: startPos, - } - remain = remain[len(literalVal):] - } - - ch <- terminator - remain = remain[len(terminator.Content):] - pos = terminator.Pos - // Safe to use len() here because none of the terminator tokens - // can contain UTF-8 sequences. - pos.Column = pos.Column + len(terminator.Content) - - switch terminator.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done! - break All - case BEGIN: - nesting++ - case CQUOTE: - // nothing special to do - default: - // Should never happen - panic("invalid string/literal terminator") - } - - // Now we do the processing of the insides of ${ .. } sequences. - // This loop terminates when we encounter either a closing } or - // an opening ", which will cause us to return to literal processing. - Interpolation: - for { - - token, size, newPos := scanInterpolationToken(remain, pos) - ch <- token - remain = remain[size:] - pos = newPos - - switch token.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done - // (though a syntax error that we'll catch in the parser) - break All - case END: - nesting-- - if nesting < 0 { - // Can happen if there are unbalanced ${ and } sequences - // in the input, which we'll catch in the parser. - nesting = 0 - } - break Interpolation - case OQUOTE: - // Beginning of nested quoted string - break Interpolation - } - } - } - - close(ch) -} - -// Returns the token found at the start of the given string, followed by -// the number of bytes that were consumed from the string and the adjusted -// source position. -// -// Note that the number of bytes consumed can be more than the length of -// the returned token contents if the string begins with whitespace, since -// it will be silently consumed before reading the token. -func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) { - pos := startPos - size := 0 - - // Consume whitespace, if any - for len(s) > 0 && byteIsSpace(s[0]) { - if s[0] == '\n' { - pos.Column = 1 - pos.Line++ - } else { - pos.Column++ - } - size++ - s = s[1:] - } - - // Unexpected EOF during sequence - if len(s) == 0 { - return &Token{ - Type: EOF, - Content: "", - Pos: pos, - }, size, pos - } - - next := s[0] - var token *Token - - switch next { - case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':': - // Easy punctuation symbols that don't have any special meaning - // during scanning, and that stand for themselves in the - // TokenType enumeration. - token = &Token{ - Type: TokenType(next), - Content: s[:1], - Pos: pos, - } - case '}': - token = &Token{ - Type: END, - Content: s[:1], - Pos: pos, - } - case '"': - token = &Token{ - Type: OQUOTE, - Content: s[:1], - Pos: pos, - } - case '!': - if len(s) >= 2 && s[:2] == "!=" { - token = &Token{ - Type: NOTEQUAL, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: BANG, - Content: s[:1], - Pos: pos, - } - } - case '<': - if len(s) >= 2 && s[:2] == "<=" { - token = &Token{ - Type: LTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: LT, - Content: s[:1], - Pos: pos, - } - } - case '>': - if len(s) >= 2 && s[:2] == ">=" { - token = &Token{ - Type: GTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: GT, - Content: s[:1], - Pos: pos, - } - } - case '=': - if len(s) >= 2 && s[:2] == "==" { - token = &Token{ - Type: EQUAL, - Content: s[:2], - Pos: pos, - } - } else { - // A single equals is not a valid operator - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '&': - if len(s) >= 2 && s[:2] == "&&" { - token = &Token{ - Type: AND, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '|': - if len(s) >= 2 && s[:2] == "||" { - token = &Token{ - Type: OR, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - default: - if next >= '0' && next <= '9' { - num, numType := scanNumber(s) - token = &Token{ - Type: numType, - Content: num, - Pos: pos, - } - } else if stringStartsWithIdentifier(s) { - ident, runeLen := scanIdentifier(s) - tokenType := IDENTIFIER - if ident == "true" || ident == "false" { - tokenType = BOOL - } - token = &Token{ - Type: tokenType, - Content: ident, - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + runeLen - return token, size + len(ident), pos - } else { - _, byteLen := utf8.DecodeRuneInString(s) - token = &Token{ - Type: INVALID, - Content: s[:byteLen], - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + 1 - return token, size + byteLen, pos - } - } - - // Here we assume that the token content contains no UTF-8 sequences, - // because we dealt with UTF-8 characters as a special case where - // necessary above. - size = size + len(token.Content) - pos.Column = pos.Column + len(token.Content) - - return token, size, pos -} - -// Returns the (possibly-empty) prefix of the given string that represents -// a literal, followed by the token that marks the end of the literal. -func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { - litLen := 0 - pos := startPos - var terminator *Token - for { - - if litLen >= len(s) { - if nested { - // We've ended in the middle of a quoted string, - // which means this token is actually invalid. - return "", &Token{ - Type: INVALID, - Content: s, - Pos: startPos, - } - } - terminator = &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break - } - - next := s[litLen] - - if next == '$' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '{' { - terminator = &Token{ - Type: BEGIN, - Content: s[litLen : litLen+2], - Pos: pos, - } - pos.Column = pos.Column + 2 - break - } else if follow == '$' { - // Double-$ escapes the special processing of $, - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - - // special handling that applies only to quoted strings - if nested { - if next == '"' { - terminator = &Token{ - Type: CQUOTE, - Content: s[litLen : litLen+1], - Pos: pos, - } - pos.Column = pos.Column + 1 - break - } - - // Escaped quote marks do not terminate the string. - // - // All we do here in the scanner is avoid terminating a string - // due to an escaped quote. The parser is responsible for the - // full handling of escape sequences, since it's able to produce - // better error messages than we can produce in here. - if next == '\\' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '"' { - // \" escapes the special processing of ", - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } else if follow == '\\' { - // \\ escapes \ - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - } - - if next == '\n' { - pos.Column = 1 - pos.Line++ - litLen++ - } else { - pos.Column++ - - // "Column" measures runes, so we need to actually consume - // a valid UTF-8 character here. - _, size := utf8.DecodeRuneInString(s[litLen:]) - litLen = litLen + size - } - - } - - return s[:litLen], terminator -} - -// scanNumber returns the extent of the prefix of the string that represents -// a valid number, along with what type of number it represents: INT or FLOAT. -// -// scanNumber does only basic character analysis: numbers consist of digits -// and periods, with at least one period signalling a FLOAT. It's the parser's -// responsibility to validate the form and range of the number, such as ensuring -// that a FLOAT actually contains only one period, etc. -func scanNumber(s string) (string, TokenType) { - period := -1 - byteLen := 0 - numType := INTEGER - for { - if byteLen >= len(s) { - break - } - - next := s[byteLen] - if next != '.' && (next < '0' || next > '9') { - // If our last value was a period, then we're not a float, - // we're just an integer that ends in a period. - if period == byteLen-1 { - byteLen-- - numType = INTEGER - } - - break - } - - if next == '.' { - // If we've already seen a period, break out - if period >= 0 { - break - } - - period = byteLen - numType = FLOAT - } - - byteLen++ - } - - return s[:byteLen], numType -} - -// scanIdentifier returns the extent of the prefix of the string that -// represents a valid identifier, along with the length of that prefix -// in runes. -// -// Identifiers may contain utf8-encoded non-Latin letters, which will -// cause the returned "rune length" to be shorter than the byte length -// of the returned string. -func scanIdentifier(s string) (string, int) { - byteLen := 0 - runeLen := 0 - for { - if byteLen >= len(s) { - break - } - - nextRune, size := utf8.DecodeRuneInString(s[byteLen:]) - if !(nextRune == '_' || - nextRune == '-' || - nextRune == '.' || - nextRune == '*' || - unicode.IsNumber(nextRune) || - unicode.IsLetter(nextRune) || - unicode.IsMark(nextRune)) { - break - } - - // If we reach a star, it must be between periods to be part - // of the same identifier. - if nextRune == '*' && s[byteLen-1] != '.' { - break - } - - // If our previous character was a star, then the current must - // be period. Otherwise, undo that and exit. - if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' { - byteLen-- - if s[byteLen-1] == '.' { - byteLen-- - } - - break - } - - byteLen = byteLen + size - runeLen = runeLen + 1 - } - - return s[:byteLen], runeLen -} - -// byteIsSpace implements a restrictive interpretation of spaces that includes -// only what's valid inside interpolation sequences: spaces, tabs, newlines. -func byteIsSpace(b byte) bool { - switch b { - case ' ', '\t', '\r', '\n': - return true - default: - return false - } -} - -// stringStartsWithIdentifier returns true if the given string begins with -// a character that is a legal start of an identifier: an underscore or -// any character that Unicode considers to be a letter. -func stringStartsWithIdentifier(s string) bool { - if len(s) == 0 { - return false - } - - first := s[0] - - // Easy ASCII cases first - if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' { - return true - } - - // If our first byte begins a UTF-8 sequence then the sequence might - // be a unicode letter. - if utf8.RuneStart(first) { - firstRune, _ := utf8.DecodeRuneInString(s) - if unicode.IsLetter(firstRune) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go deleted file mode 100644 index b6c82ae9..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/token.go +++ /dev/null @@ -1,105 +0,0 @@ -package scanner - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -type Token struct { - Type TokenType - Content string - Pos ast.Pos -} - -//go:generate stringer -type=TokenType -type TokenType rune - -const ( - // Raw string data outside of ${ .. } sequences - LITERAL TokenType = 'o' - - // STRING is like a LITERAL but it's inside a quoted string - // within a ${ ... } sequence, and so it can contain backslash - // escaping. - STRING TokenType = 'S' - - // Other Literals - INTEGER TokenType = 'I' - FLOAT TokenType = 'F' - BOOL TokenType = 'B' - - BEGIN TokenType = '$' // actually "${" - END TokenType = '}' - OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence - CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence - OPAREN TokenType = '(' - CPAREN TokenType = ')' - OBRACKET TokenType = '[' - CBRACKET TokenType = ']' - COMMA TokenType = ',' - - IDENTIFIER TokenType = 'i' - - PERIOD TokenType = '.' - PLUS TokenType = '+' - MINUS TokenType = '-' - STAR TokenType = '*' - SLASH TokenType = '/' - PERCENT TokenType = '%' - - AND TokenType = '∧' - OR TokenType = '∨' - BANG TokenType = '!' - - EQUAL TokenType = '=' - NOTEQUAL TokenType = '≠' - GT TokenType = '>' - LT TokenType = '<' - GTE TokenType = '≥' - LTE TokenType = '≤' - - QUESTION TokenType = '?' - COLON TokenType = ':' - - EOF TokenType = '␄' - - // Produced for sequences that cannot be understood as valid tokens - // e.g. due to use of unrecognized punctuation. - INVALID TokenType = '�' -) - -func (t *Token) String() string { - switch t.Type { - case EOF: - return "end of string" - case INVALID: - return fmt.Sprintf("invalid sequence %q", t.Content) - case INTEGER: - return fmt.Sprintf("integer %s", t.Content) - case FLOAT: - return fmt.Sprintf("float %s", t.Content) - case STRING: - return fmt.Sprintf("string %q", t.Content) - case LITERAL: - return fmt.Sprintf("literal %q", t.Content) - case OQUOTE: - return fmt.Sprintf("opening quote") - case CQUOTE: - return fmt.Sprintf("closing quote") - case AND: - return "&&" - case OR: - return "||" - case NOTEQUAL: - return "!=" - case GTE: - return ">=" - case LTE: - return "<=" - default: - // The remaining token types have content that - // speaks for itself. - return fmt.Sprintf("%q", t.Content) - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go deleted file mode 100644 index a602f5fd..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by "stringer -type=TokenType"; DO NOT EDIT - -package scanner - -import "fmt" - -const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID" - -var _TokenType_map = map[TokenType]string{ - 33: _TokenType_name[0:4], - 36: _TokenType_name[4:9], - 37: _TokenType_name[9:16], - 40: _TokenType_name[16:22], - 41: _TokenType_name[22:28], - 42: _TokenType_name[28:32], - 43: _TokenType_name[32:36], - 44: _TokenType_name[36:41], - 45: _TokenType_name[41:46], - 46: _TokenType_name[46:52], - 47: _TokenType_name[52:57], - 58: _TokenType_name[57:62], - 60: _TokenType_name[62:64], - 61: _TokenType_name[64:69], - 62: _TokenType_name[69:71], - 63: _TokenType_name[71:79], - 66: _TokenType_name[79:83], - 70: _TokenType_name[83:88], - 73: _TokenType_name[88:95], - 83: _TokenType_name[95:101], - 91: _TokenType_name[101:109], - 93: _TokenType_name[109:117], - 105: _TokenType_name[117:127], - 111: _TokenType_name[127:134], - 125: _TokenType_name[134:137], - 8220: _TokenType_name[137:143], - 8221: _TokenType_name[143:149], - 8743: _TokenType_name[149:152], - 8744: _TokenType_name[152:154], - 8800: _TokenType_name[154:162], - 8804: _TokenType_name[162:165], - 8805: _TokenType_name[165:168], - 9220: _TokenType_name[168:171], - 65533: _TokenType_name[171:178], -} - -func (i TokenType) String() string { - if str, ok := _TokenType_map[i]; ok { - return str - } - return fmt.Sprintf("TokenType(%d)", i) -} diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go deleted file mode 100644 index e69df294..00000000 --- a/vendor/github.com/hashicorp/hil/transform_fixed.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" -) - -// FixedValueTransform transforms an AST to return a fixed value for -// all interpolations. i.e. you can make "hi ${anything}" always -// turn into "hi foo". -// -// The primary use case for this is for config validations where you can -// verify that interpolations result in a certain type of string. -func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node { - // We visit the nodes in top-down order - result := root - switch n := result.(type) { - case *ast.Output: - for i, v := range n.Exprs { - n.Exprs[i] = FixedValueTransform(v, Value) - } - case *ast.LiteralNode: - // We keep it as-is - default: - // Anything else we replace - result = Value - } - - return result -} diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go deleted file mode 100644 index 0ace8306..00000000 --- a/vendor/github.com/hashicorp/hil/walk.go +++ /dev/null @@ -1,266 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/reflectwalk" -) - -// WalkFn is the type of function to pass to Walk. Modify fields within -// WalkData to control whether replacement happens. -type WalkFn func(*WalkData) error - -// WalkData is the structure passed to the callback of the Walk function. -// -// This structure contains data passed in as well as fields that are expected -// to be written by the caller as a result. Please see the documentation for -// each field for more information. -type WalkData struct { - // Root is the parsed root of this HIL program - Root ast.Node - - // Location is the location within the structure where this - // value was found. This can be used to modify behavior within - // slices and so on. - Location reflectwalk.Location - - // The below two values must be set by the callback to have any effect. - // - // Replace, if true, will replace the value in the structure with - // ReplaceValue. It is up to the caller to make sure this is a string. - Replace bool - ReplaceValue string -} - -// Walk will walk an arbitrary Go structure and parse any string as an -// HIL program and call the callback cb to determine what to replace it -// with. -// -// This function is very useful for arbitrary HIL program interpolation -// across a complex configuration structure. Due to the heavy use of -// reflection in this function, it is recommend to write many unit tests -// with your typical configuration structures to hilp mitigate the risk -// of panics. -func Walk(v interface{}, cb WalkFn) error { - walker := &interpolationWalker{F: cb} - return reflectwalk.Walk(v, walker) -} - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - F WalkFn - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex int - unknownKeys []string -} - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - w.key = append(w.key, k.String()) - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = i - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.F == nil { - return nil - } - - data := WalkData{Root: astRoot, Location: w.loc} - if err := w.F(&data); err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if data.Replace { - /* - if remove { - w.removeCurrent() - return nil - } - */ - - resultVal := reflect.ValueOf(data.ReplaceValue) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) removeCurrent() { - // Append the key to the unknown keys - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - - for i := 1; i <= len(w.cs); i++ { - c := w.cs[len(w.cs)-i] - switch c.Kind() { - case reflect.Map: - // Zero value so that we delete the map key - var val reflect.Value - - // Get the key and delete it - k := w.csData.(reflect.Value) - c.SetMapIndex(k, val) - return - } - } - - panic("No container found for removeCurrent") -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func (w *interpolationWalker) splitSlice() { - // Get the []interface{} slice so we can do some operations on - // it without dealing with reflection. We'll document each step - // here to be clear. - var s []interface{} - raw := w.cs[len(w.cs)-1] - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - default: - panic("Unknown kind: " + raw.Kind().String()) - } - - // Check if we have any elements that we need to split. If not, then - // just return since we're done. - split := false - if !split { - return - } - - // Make a new result slice that is twice the capacity to fit our growth. - result := make([]interface{}, 0, len(s)*2) - - // Go over each element of the original slice and start building up - // the resulting slice by splitting where we have to. - for _, v := range s { - sv, ok := v.(string) - if !ok { - // Not a string, so just set it - result = append(result, v) - continue - } - - // Not a string list, so just set it - result = append(result, sv) - } - - // Our slice is now done, we have to replace the slice now - // with this new one that we have. - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go deleted file mode 100644 index 0dae567d..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go +++ /dev/null @@ -1,61 +0,0 @@ -package auth - -import ( - "github.com/hashicorp/terraform-svchost" -) - -// CachingCredentialsSource creates a new credentials source that wraps another -// and caches its results in memory, on a per-hostname basis. -// -// No means is provided for expiration of cached credentials, so a caching -// credentials source should have a limited lifetime (one Terraform operation, -// for example) to ensure that time-limited credentials don't expire before -// their cache entries do. -func CachingCredentialsSource(source CredentialsSource) CredentialsSource { - return &cachingCredentialsSource{ - source: source, - cache: map[svchost.Hostname]HostCredentials{}, - } -} - -type cachingCredentialsSource struct { - source CredentialsSource - cache map[svchost.Hostname]HostCredentials -} - -// ForHost passes the given hostname on to the wrapped credentials source and -// caches the result to return for future requests with the same hostname. -// -// Both credentials and non-credentials (nil) responses are cached. -// -// No cache entry is created if the wrapped source returns an error, to allow -// the caller to retry the failing operation. -func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if cache, cached := s.cache[host]; cached { - return cache, nil - } - - result, err := s.source.ForHost(host) - if err != nil { - return result, err - } - - s.cache[host] = result - return result, nil -} - -func (s *cachingCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see which object (old or new) is actually present. - delete(s.cache, host) - return s.source.StoreForHost(host, credentials) -} - -func (s *cachingCredentialsSource) ForgetForHost(host svchost.Hostname) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see if the object is still present. - delete(s.cache, host) - return s.source.ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go deleted file mode 100644 index 36441cd1..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package auth contains types and functions to manage authentication -// credentials for service hosts. -package auth - -import ( - "fmt" - "net/http" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-svchost" -) - -// Credentials is a list of CredentialsSource objects that can be tried in -// turn until one returns credentials for a host, or one returns an error. -// -// A Credentials is itself a CredentialsSource, wrapping its members. -// In principle one CredentialsSource can be nested inside another, though -// there is no good reason to do so. -// -// The write operations on a Credentials are tried only on the first object, -// under the assumption that it is the primary store. -type Credentials []CredentialsSource - -// NoCredentials is an empty CredentialsSource that always returns nil -// when asked for credentials. -var NoCredentials CredentialsSource = Credentials{} - -// A CredentialsSource is an object that may be able to provide credentials -// for a given host. -// -// Credentials lookups are not guaranteed to be concurrency-safe. Callers -// using these facilities in concurrent code must use external concurrency -// primitives to prevent race conditions. -type CredentialsSource interface { - // ForHost returns a non-nil HostCredentials if the source has credentials - // available for the host, and a nil HostCredentials if it does not. - // - // If an error is returned, progress through a list of CredentialsSources - // is halted and the error is returned to the user. - ForHost(host svchost.Hostname) (HostCredentials, error) - - // StoreForHost takes a HostCredentialsWritable and saves it as the - // credentials for the given host. - // - // If credentials are already stored for the given host, it will try to - // replace those credentials but may produce an error if such replacement - // is not possible. - StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error - - // ForgetForHost discards any stored credentials for the given host. It - // does nothing and returns successfully if no credentials are saved - // for that host. - ForgetForHost(host svchost.Hostname) error -} - -// HostCredentials represents a single set of credentials for a particular -// host. -type HostCredentials interface { - // PrepareRequest modifies the given request in-place to apply the - // receiving credentials. The usual behavior of this method is to - // add some sort of Authorization header to the request. - PrepareRequest(req *http.Request) - - // Token returns the authentication token. - Token() string -} - -// HostCredentialsWritable is an extension of HostCredentials for credentials -// objects that can be serialized as a JSON-compatible object value for -// storage. -type HostCredentialsWritable interface { - HostCredentials - - // ToStore returns a cty.Value, always of an object type, - // representing data that can be serialized to represent this object - // in persistent storage. - // - // The resulting value may uses only cty values that can be accepted - // by the cty JSON encoder, though the caller may elect to instead store - // it in some other format that has a JSON-compatible type system. - ToStore() cty.Value -} - -// ForHost iterates over the contained CredentialsSource objects and -// tries to obtain credentials for the given host from each one in turn. -// -// If any source returns either a non-nil HostCredentials or a non-nil error -// then this result is returned. Otherwise, the result is nil, nil. -func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { - for _, source := range c { - creds, err := source.ForHost(host) - if creds != nil || err != nil { - return creds, err - } - } - return nil, nil -} - -// StoreForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].StoreForHost(host, credentials) -} - -// ForgetForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) ForgetForHost(host svchost.Hostname) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go deleted file mode 100644 index 7198c674..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go +++ /dev/null @@ -1,48 +0,0 @@ -package auth - -import ( - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsFromMap converts a map of key-value pairs from a credentials -// definition provided by the user (e.g. in a config file, or via a credentials -// helper) into a HostCredentials object if possible, or returns nil if -// no credentials could be extracted from the map. -// -// This function ignores map keys it is unfamiliar with, to allow for future -// expansion of the credentials map format for new credential types. -func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { - if m == nil { - return nil - } - if token, ok := m["token"].(string); ok { - return HostCredentialsToken(token) - } - return nil -} - -// HostCredentialsFromObject converts a cty.Value of an object type into a -// HostCredentials object if possible, or returns nil if no credentials could -// be extracted from the map. -// -// This function ignores object attributes it is unfamiliar with, to allow for -// future expansion of the credentials object structure for new credential types. -// -// If the given value is not of an object type, this function will panic. -func HostCredentialsFromObject(obj cty.Value) HostCredentials { - if !obj.Type().HasAttribute("token") { - return nil - } - - tokenV := obj.GetAttr("token") - if tokenV.IsNull() || !tokenV.IsKnown() { - return nil - } - if !cty.String.Equals(tokenV.Type()) { - // Weird, but maybe some future Terraform version accepts an object - // here for some reason, so we'll be resilient. - return nil - } - - return HostCredentialsToken(tokenV.AsString()) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go deleted file mode 100644 index 76505f20..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go +++ /dev/null @@ -1,149 +0,0 @@ -package auth - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "path/filepath" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-svchost" -) - -type helperProgramCredentialsSource struct { - executable string - args []string -} - -// HelperProgramCredentialsSource returns a CredentialsSource that runs the -// given program with the given arguments in order to obtain credentials. -// -// The given executable path must be an absolute path; it is the caller's -// responsibility to validate and process a relative path or other input -// provided by an end-user. If the given path is not absolute, this -// function will panic. -// -// When credentials are requested, the program will be run in a child process -// with the given arguments along with two additional arguments added to the -// end of the list: the literal string "get", followed by the requested -// hostname in ASCII compatibility form (punycode form). -func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { - if !filepath.IsAbs(executable) { - panic("NewCredentialsSourceHelperProgram requires absolute path to executable") - } - - fullArgs := make([]string, len(args)+1) - fullArgs[0] = executable - copy(fullArgs[1:], args) - - return &helperProgramCredentialsSource{ - executable: executable, - args: fullArgs, - } -} - -func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "get") - args = append(args, string(host)) - - outBuf := bytes.Buffer{} - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stdout: &outBuf, - Stderr: &errBuf, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return nil, fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - var m map[string]interface{} - err = json.Unmarshal(outBuf.Bytes(), &m) - if err != nil { - return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) - } - - return HostCredentialsFromMap(m), nil -} - -func (s *helperProgramCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "store") - args = append(args, string(host)) - - toStore := credentials.ToStore() - toStoreRaw, err := ctyjson.Marshal(toStore, toStore.Type()) - if err != nil { - return fmt.Errorf("can't serialize credentials to store: %s", err) - } - - inReader := bytes.NewReader(toStoreRaw) - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: inReader, - Stderr: &errBuf, - Stdout: nil, - } - err = cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} - -func (s *helperProgramCredentialsSource) ForgetForHost(host svchost.Hostname) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "forget") - args = append(args, string(host)) - - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stderr: &errBuf, - Stdout: nil, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go deleted file mode 100644 index f8b0b076..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go +++ /dev/null @@ -1,38 +0,0 @@ -package auth - -import ( - "fmt" - - "github.com/hashicorp/terraform-svchost" -) - -// StaticCredentialsSource is a credentials source that retrieves credentials -// from the provided map. It returns nil if a requested hostname is not -// present in the map. -// -// The caller should not modify the given map after passing it to this function. -func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { - return staticCredentialsSource(creds) -} - -type staticCredentialsSource map[svchost.Hostname]map[string]interface{} - -func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if s == nil { - return nil, nil - } - - if m, exists := s[host]; exists { - return HostCredentialsFromMap(m), nil - } - - return nil, nil -} - -func (s staticCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - return fmt.Errorf("can't store new credentials in a static credentials source") -} - -func (s staticCredentialsSource) ForgetForHost(host svchost.Hostname) error { - return fmt.Errorf("can't discard credentials from a static credentials source") -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go deleted file mode 100644 index 1d36553a..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go +++ /dev/null @@ -1,43 +0,0 @@ -package auth - -import ( - "net/http" - - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsToken is a HostCredentials implementation that represents a -// single "bearer token", to be sent to the server via an Authorization header -// with the auth type set to "Bearer". -// -// To save a token as the credentials for a host, convert the token string to -// this type and use the result as a HostCredentialsWritable implementation. -type HostCredentialsToken string - -// Interface implementation assertions. Compilation will fail here if -// HostCredentialsToken does not fully implement these interfaces. -var _ HostCredentials = HostCredentialsToken("") -var _ HostCredentialsWritable = HostCredentialsToken("") - -// PrepareRequest alters the given HTTP request by setting its Authorization -// header to the string "Bearer " followed by the encapsulated authentication -// token. -func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { - if req.Header == nil { - req.Header = http.Header{} - } - req.Header.Set("Authorization", "Bearer "+string(tc)) -} - -// Token returns the authentication token. -func (tc HostCredentialsToken) Token() string { - return string(tc) -} - -// ToStore returns a credentials object with a single attribute "token" whose -// value is the token string. -func (tc HostCredentialsToken) ToStore() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "token": cty.StringVal(string(tc)), - }) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go deleted file mode 100644 index 97831363..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go +++ /dev/null @@ -1,275 +0,0 @@ -// Package disco handles Terraform's remote service discovery protocol. -// -// This protocol allows mapping from a service hostname, as produced by the -// svchost package, to a set of services supported by that host and the -// endpoint information for each supported service. -package disco - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net/http" - "net/url" - "time" - - "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/auth" -) - -const ( - // Fixed path to the discovery manifest. - discoPath = "/.well-known/terraform.json" - - // Arbitrary-but-small number to prevent runaway redirect loops. - maxRedirects = 3 - - // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. - discoTimeout = 11 * time.Second - - // 1MB - to prevent abusive services from using loads of our memory. - maxDiscoDocBytes = 1 * 1024 * 1024 -) - -// httpTransport is overridden during tests, to skip TLS verification. -var httpTransport = defaultHttpTransport() - -// Disco is the main type in this package, which allows discovery on given -// hostnames and caches the results by hostname to avoid repeated requests -// for the same information. -type Disco struct { - hostCache map[svchost.Hostname]*Host - credsSrc auth.CredentialsSource - - // Transport is a custom http.RoundTripper to use. - Transport http.RoundTripper -} - -// New returns a new initialized discovery object. -func New() *Disco { - return NewWithCredentialsSource(nil) -} - -// NewWithCredentialsSource returns a new discovery object initialized with -// the given credentials source. -func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { - return &Disco{ - hostCache: make(map[svchost.Hostname]*Host), - credsSrc: credsSrc, - Transport: httpTransport, - } -} - -func (d *Disco) SetUserAgent(uaString string) { - d.Transport = &userAgentRoundTripper{ - innerRt: d.Transport, - userAgent: uaString, - } -} - -// SetCredentialsSource provides a credentials source that will be used to -// add credentials to outgoing discovery requests, where available. -// -// If this method is never called, no outgoing discovery requests will have -// credentials. -func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { - d.credsSrc = src -} - -// CredentialsSource returns the credentials source associated with the receiver, -// or an empty credentials source if none is associated. -func (d *Disco) CredentialsSource() auth.CredentialsSource { - if d.credsSrc == nil { - // We'll return an empty one just to save the caller from having to - // protect against the nil case, since this interface already allows - // for the possibility of there being no credentials at all. - return auth.StaticCredentialsSource(nil) - } - return d.credsSrc -} - -// CredentialsForHost returns a non-nil HostCredentials if the embedded source has -// credentials available for the host, and a nil HostCredentials if it does not. -func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { - if d.credsSrc == nil { - return nil, nil - } - return d.credsSrc.ForHost(hostname) -} - -// ForceHostServices provides a pre-defined set of services for a given -// host, which prevents the receiver from attempting network-based discovery -// for the given host. Instead, the given services map will be returned -// verbatim. -// -// When providing "forced" services, any relative URLs are resolved against -// the initial discovery URL that would have been used for network-based -// discovery, yielding the same results as if the given map were published -// at the host's default discovery URL, though using absolute URLs is strongly -// recommended to make the configured behavior more explicit. -func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { - if services == nil { - services = map[string]interface{}{} - } - - d.hostCache[hostname] = &Host{ - discoURL: &url.URL{ - Scheme: "https", - Host: string(hostname), - Path: discoPath, - }, - hostname: hostname.ForDisplay(), - services: services, - transport: d.Transport, - } -} - -// Discover runs the discovery protocol against the given hostname (which must -// already have been validated and prepared with svchost.ForComparison) and -// returns an object describing the services available at that host. -// -// If a given hostname supports no Terraform services at all, a non-nil but -// empty Host object is returned. When giving feedback to the end user about -// such situations, we say "host does not provide a service", -// regardless of whether that is due to that service specifically being absent -// or due to the host not providing Terraform services at all, since we don't -// wish to expose the detail of whole-host discovery to an end-user. -func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { - if host, cached := d.hostCache[hostname]; cached { - return host, nil - } - - host, err := d.discover(hostname) - if err != nil { - return nil, err - } - d.hostCache[hostname] = host - - return host, nil -} - -// DiscoverServiceURL is a convenience wrapper for discovery on a given -// hostname and then looking up a particular service in the result. -func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { - host, err := d.Discover(hostname) - if err != nil { - return nil, err - } - return host.ServiceURL(serviceID) -} - -// discover implements the actual discovery process, with its result cached -// by the public-facing Discover method. -func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { - discoURL := &url.URL{ - Scheme: "https", - Host: hostname.String(), - Path: discoPath, - } - - client := &http.Client{ - Transport: d.Transport, - Timeout: discoTimeout, - - CheckRedirect: func(req *http.Request, via []*http.Request) error { - log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) - if len(via) > maxRedirects { - return errors.New("too many redirects") // this error will never actually be seen - } - return nil - }, - } - - req := &http.Request{ - Header: make(http.Header), - Method: "GET", - URL: discoURL, - } - req.Header.Set("Accept", "application/json") - - creds, err := d.CredentialsForHost(hostname) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) - } - if creds != nil { - // Update the request to include credentials. - creds.PrepareRequest(req) - } - - log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request discovery document: %v", err) - } - defer resp.Body.Close() - - host := &Host{ - // Use the discovery URL from resp.Request in - // case the client followed any redirects. - discoURL: resp.Request.URL, - hostname: hostname.ForDisplay(), - transport: d.Transport, - } - - // Return the host without any services. - if resp.StatusCode == 404 { - return host, nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) - } - - contentType := resp.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) - } - if mediaType != "application/json" { - return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) - } - - // This doesn't catch chunked encoding, because ContentLength is -1 in that case. - if resp.ContentLength > maxDiscoDocBytes { - // Size limit here is not a contractual requirement and so we may - // adjust it over time if we find a different limit is warranted. - return nil, fmt.Errorf( - "Discovery doc response is too large (got %d bytes; limit %d)", - resp.ContentLength, maxDiscoDocBytes, - ) - } - - // If the response is using chunked encoding then we can't predict its - // size, but we'll at least prevent reading the entire thing into memory. - lr := io.LimitReader(resp.Body, maxDiscoDocBytes) - - servicesBytes, err := ioutil.ReadAll(lr) - if err != nil { - return nil, fmt.Errorf("Error reading discovery document body: %v", err) - } - - var services map[string]interface{} - err = json.Unmarshal(servicesBytes, &services) - if err != nil { - return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) - } - host.services = services - - return host, nil -} - -// Forget invalidates any cached record of the given hostname. If the host -// has no cache entry then this is a no-op. -func (d *Disco) Forget(hostname svchost.Hostname) { - delete(d.hostCache, hostname) -} - -// ForgetAll is like Forget, but for all of the hostnames that have cache entries. -func (d *Disco) ForgetAll() { - d.hostCache = make(map[svchost.Hostname]*Host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go b/vendor/github.com/hashicorp/terraform-svchost/disco/host.go deleted file mode 100644 index d0ec8ee6..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go +++ /dev/null @@ -1,423 +0,0 @@ -package disco - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-version" -) - -const versionServiceID = "versions.v1" - -// Host represents a service discovered host. -type Host struct { - discoURL *url.URL - hostname string - services map[string]interface{} - transport http.RoundTripper -} - -// Constraints represents the version constraints of a service. -type Constraints struct { - Service string `json:"service"` - Product string `json:"product"` - Minimum string `json:"minimum"` - Maximum string `json:"maximum"` - Excluding []string `json:"excluding"` -} - -// ErrServiceNotProvided is returned when the service is not provided. -type ErrServiceNotProvided struct { - hostname string - service string -} - -// Error returns a customized error message. -func (e *ErrServiceNotProvided) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not provide a %s service", e.service) - } - return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) -} - -// ErrVersionNotSupported is returned when the version is not supported. -type ErrVersionNotSupported struct { - hostname string - service string - version string -} - -// Error returns a customized error message. -func (e *ErrVersionNotSupported) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not support %s version %s", e.service, e.version) - } - return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) -} - -// ErrNoVersionConstraints is returned when checkpoint was disabled -// or the endpoint to query for version constraints was unavailable. -type ErrNoVersionConstraints struct { - disabled bool -} - -// Error returns a customized error message. -func (e *ErrNoVersionConstraints) Error() string { - if e.disabled { - return "checkpoint disabled" - } - return "unable to contact versions service" -} - -// ServiceURL returns the URL associated with the given service identifier, -// which should be of the form "servicename.vN". -// -// A non-nil result is always an absolute URL with a scheme of either HTTPS -// or HTTP. -func (h *Host) ServiceURL(id string) (*url.URL, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - urlStr, ok := h.services[id].(string) - if !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse service URL: %v", err) - } - - return u, nil -} - -// ServiceOAuthClient returns the OAuth client configuration associated with the -// given service identifier, which should be of the form "servicename.vN". -// -// This is an alternative to ServiceURL for unusual services that require -// a full OAuth2 client definition rather than just a URL. Use this only -// for services whose specification calls for this sort of definition. -func (h *Host) ServiceOAuthClient(id string) (*OAuthClient, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - if _, ok := h.services[id]; !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - var raw map[string]interface{} - switch v := h.services[id].(type) { - case map[string]interface{}: - raw = v // Great! - case []map[string]interface{}: - // An absolutely infuriating legacy HCL ambiguity. - raw = v[0] - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] The definition for %s has Go type %T", id, h.services[id]) - return nil, fmt.Errorf("Service %s must be declared with an object value in the service discovery document", id) - } - - var grantTypes OAuthGrantTypeSet - if rawGTs, ok := raw["grant_types"]; ok { - if gts, ok := rawGTs.([]interface{}); ok { - var kws []string - for _, gtI := range gts { - gt, ok := gtI.(string) - if !ok { - // We'll ignore this so that we can potentially introduce - // other types into this array later if we need to. - continue - } - kws = append(kws, gt) - } - grantTypes = NewOAuthGrantTypeSet(kws...) - } else { - return nil, fmt.Errorf("Service %s is defined with invalid grant_types property: must be an array of grant type strings", id) - } - } else { - grantTypes = NewOAuthGrantTypeSet("authz_code") - } - - ret := &OAuthClient{ - SupportedGrantTypes: grantTypes, - } - if clientIDStr, ok := raw["client"].(string); ok { - ret.ID = clientIDStr - } else { - return nil, fmt.Errorf("Service %s definition is missing required property \"client\"", id) - } - if urlStr, ok := raw["authz"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse authorization URL: %v", err) - } - ret.AuthorizationURL = u - } else { - if grantTypes.RequiresAuthorizationEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"authz\"", id) - } - } - if urlStr, ok := raw["token"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse token URL: %v", err) - } - ret.TokenURL = u - } else { - if grantTypes.RequiresTokenEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"token\"", id) - } - } - if portsRaw, ok := raw["ports"].([]interface{}); ok { - if len(portsRaw) != 2 { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: must be a two-element array", id) - } - invalidPortsErr := fmt.Errorf("Invalid \"ports\" definition for service %s: both ports must be whole numbers between 1024 and 65535", id) - ports := make([]uint16, 2) - for i := range ports { - switch v := portsRaw[i].(type) { - case float64: - // JSON unmarshaling always produces float64. HCL 2 might, if - // an invalid fractional number were given. - if float64(uint16(v)) != v || v < 1024 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - case int: - // Legacy HCL produces int. HCL 2 will too, if the given number - // is a whole number. - if v < 1024 || v > 65535 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] Port value %d has Go type %T", i, portsRaw[i]) - return nil, invalidPortsErr - } - } - if ports[1] < ports[0] { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: minimum port cannot be greater than maximum port", id) - } - ret.MinPort = ports[0] - ret.MaxPort = ports[1] - } else { - // Default is to accept any port in the range, for a client that is - // able to call back to any localhost port. - ret.MinPort = 1024 - ret.MaxPort = 65535 - } - if scopesRaw, ok := raw["scopes"].([]interface{}); ok { - var scopes []string - for _, scopeI := range scopesRaw { - scope, ok := scopeI.(string) - if !ok { - return nil, fmt.Errorf("Invalid \"scopes\" for service %s: all scopes must be strings", id) - } - scopes = append(scopes, scope) - } - ret.Scopes = scopes - } - - return ret, nil -} - -func (h *Host) parseURL(urlStr string) (*url.URL, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - // Make relative URLs absolute using our discovery URL. - if !u.IsAbs() { - u = h.discoURL.ResolveReference(u) - } - - if u.Scheme != "https" && u.Scheme != "http" { - return nil, fmt.Errorf("unsupported scheme %s", u.Scheme) - } - if u.User != nil { - return nil, fmt.Errorf("embedded username/password information is not permitted") - } - - // Fragment part is irrelevant, since we're not a browser. - u.Fragment = "" - - return u, nil -} - -// VersionConstraints returns the contraints for a given service identifier -// (which should be of the form "servicename.vN") and product. -// -// When an exact (service and version) match is found, the constraints for -// that service are returned. -// -// When the requested version is not provided but the service is, we will -// search for all alternative versions. If mutliple alternative versions -// are found, the contrains of the latest available version are returned. -// -// When a service is not provided at all an error will be returned instead. -// -// When checkpoint is disabled or when a 404 is returned after making the -// HTTP call, an ErrNoVersionConstraints error will be returned. -func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { - svc, _, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // Return early if checkpoint is disabled. - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { - return nil, &ErrNoVersionConstraints{disabled: true} - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - // Try to get the service URL for the version service and - // return early if the service isn't provided by the host. - u, err := h.ServiceURL(versionServiceID) - if err != nil { - return nil, err - } - - // Check if we have an exact (service and version) match. - if _, ok := h.services[id].(string); !ok { - // If we don't have an exact match, we search for all matching - // services and then use the service ID of the latest version. - var services []string - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - services = append(services, serviceID) - } - } - - if len(services) == 0 { - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - // Set id to the latest service ID we found. - var latest *version.Version - for _, serviceID := range services { - if _, ver, err := parseServiceID(serviceID); err == nil { - if latest == nil || latest.LessThan(ver) { - id = serviceID - latest = ver - } - } - } - } - - // Set a default timeout of 1 sec for the versions request (in milliseconds) - timeout := 1000 - if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { - timeout = v - } - - client := &http.Client{ - Transport: h.transport, - Timeout: time.Duration(timeout) * time.Millisecond, - } - - // Prepare the service URL by setting the service and product. - v := u.Query() - v.Set("product", product) - u.Path += id - u.RawQuery = v.Encode() - - // Create a new request. - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, fmt.Errorf("Failed to create version constraints request: %v", err) - } - req.Header.Set("Accept", "application/json") - - log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request version constraints: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 { - return nil, &ErrNoVersionConstraints{disabled: false} - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) - } - - // Parse the constraints from the response body. - result := &Constraints{} - if err := json.NewDecoder(resp.Body).Decode(result); err != nil { - return nil, fmt.Errorf("Error parsing version constraints: %v", err) - } - - return result, nil -} - -func parseServiceID(id string) (string, *version.Version, error) { - parts := strings.SplitN(id, ".", 2) - if len(parts) != 2 { - return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) - } - - version, err := version.NewVersion(parts[1]) - if err != nil { - return "", nil, fmt.Errorf("Invalid service version: %v", err) - } - - return parts[0], version, nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go b/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go deleted file mode 100644 index 7e4a3856..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go +++ /dev/null @@ -1,30 +0,0 @@ -package disco - -import ( - "net/http" - - "github.com/hashicorp/go-cleanhttp" -) - -const DefaultUserAgent = "terraform-svchost/1.0" - -func defaultHttpTransport() http.RoundTripper { - t := cleanhttp.DefaultPooledTransport() - return &userAgentRoundTripper{ - innerRt: t, - userAgent: DefaultUserAgent, - } -} - -type userAgentRoundTripper struct { - innerRt http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - - return rt.innerRt.RoundTrip(req) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go b/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go deleted file mode 100644 index 9df16ebc..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go +++ /dev/null @@ -1,183 +0,0 @@ -package disco - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/oauth2" -) - -// OAuthClient represents an OAuth client configuration, which is used for -// unusual services that require an entire OAuth client configuration as part -// of their service discovery, rather than just a URL. -type OAuthClient struct { - // ID is the identifier for the client, to be used as "client_id" in - // OAuth requests. - ID string - - // Authorization URL is the URL of the authorization endpoint that must - // be used for this OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the authorization endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - AuthorizationURL *url.URL - - // Token URL is the URL of the token endpoint that must be used for this - // OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the token endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - TokenURL *url.URL - - // MinPort and MaxPort define a range of TCP ports on localhost that this - // client is able to use as redirect_uri in an authorization request. - // Terraform will select a port from this range for the temporary HTTP - // server it creates to receive the authorization response, giving - // a URL like http://localhost:NNN/ where NNN is the selected port number. - // - // Terraform will reject any port numbers in this range less than 1024, - // to respect the common convention (enforced on some operating systems) - // that lower port numbers are reserved for "privileged" services. - MinPort, MaxPort uint16 - - // SupportedGrantTypes is a set of the grant types that the client may - // choose from. This includes an entry for each distinct type advertised - // by the server, even if a particular keyword is not supported by the - // current version of Terraform. - SupportedGrantTypes OAuthGrantTypeSet - - // Oauth2 does not require scopes for the authorization endpoint, however - // OIDC does. Optional list of scopes to include in auth code and token - // requests. - Scopes []string -} - -// Endpoint returns an oauth2.Endpoint value ready to be used with the oauth2 -// library, representing the URLs from the receiver. -func (c *OAuthClient) Endpoint() oauth2.Endpoint { - ep := oauth2.Endpoint{ - // We don't actually auth because we're not a server-based OAuth client, - // so this instead just means that we include client_id as an argument - // in our requests. - AuthStyle: oauth2.AuthStyleInParams, - } - - if c.AuthorizationURL != nil { - ep.AuthURL = c.AuthorizationURL.String() - } - if c.TokenURL != nil { - ep.TokenURL = c.TokenURL.String() - } - - return ep -} - -// OAuthGrantType is an enumeration of grant type strings that a host can -// advertise support for. -// -// Values of this type don't necessarily match with a known constant of the -// type, because they may represent grant type keywords defined in a later -// version of Terraform which this version doesn't yet know about. -type OAuthGrantType string - -const ( - // OAuthAuthzCodeGrant represents an authorization code grant, as - // defined in IETF RFC 6749 section 4.1. - OAuthAuthzCodeGrant = OAuthGrantType("authz_code") - - // OAuthOwnerPasswordGrant represents a resource owner password - // credentials grant, as defined in IETF RFC 6749 section 4.3. - OAuthOwnerPasswordGrant = OAuthGrantType("password") -) - -// UsesAuthorizationEndpoint returns true if the receiving grant type makes -// use of the authorization endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesAuthorizationEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return false - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// UsesTokenEndpoint returns true if the receiving grant type makes -// use of the token endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesTokenEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return true - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// OAuthGrantTypeSet represents a set of OAuthGrantType values. -type OAuthGrantTypeSet map[OAuthGrantType]struct{} - -// NewOAuthGrantTypeSet constructs a new grant type set from the given list -// of grant type keyword strings. Any duplicates in the list are ignored. -func NewOAuthGrantTypeSet(keywords ...string) OAuthGrantTypeSet { - ret := make(OAuthGrantTypeSet, len(keywords)) - for _, kw := range keywords { - ret[OAuthGrantType(kw)] = struct{}{} - } - return ret -} - -// Has returns true if the given grant type is in the receiving set. -func (s OAuthGrantTypeSet) Has(t OAuthGrantType) bool { - _, ok := s[t] - return ok -} - -// RequiresAuthorizationEndpoint returns true if any of the grant types in -// the set are known to require an authorization endpoint. -func (s OAuthGrantTypeSet) RequiresAuthorizationEndpoint() bool { - for t := range s { - if t.UsesAuthorizationEndpoint() { - return true - } - } - return false -} - -// RequiresTokenEndpoint returns true if any of the grant types in -// the set are known to require a token endpoint. -func (s OAuthGrantTypeSet) RequiresTokenEndpoint() bool { - for t := range s { - if t.UsesTokenEndpoint() { - return true - } - } - return false -} - -// GoString implements fmt.GoStringer. -func (s OAuthGrantTypeSet) GoString() string { - var buf strings.Builder - i := 0 - buf.WriteString("disco.NewOAuthGrantTypeSet(") - for t := range s { - if i > 0 { - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%q", string(t)) - i++ - } - buf.WriteString(")") - return buf.String() -} diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/vendor/github.com/hashicorp/terraform/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go deleted file mode 100644 index 90a5faf0..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// CountAttr is the address of an attribute of the "count" object in -// the interpolation scope, like "count.index". -type CountAttr struct { - referenceable - Name string -} - -func (ca CountAttr) String() string { - return "count." + ca.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/doc.go b/vendor/github.com/hashicorp/terraform/addrs/doc.go deleted file mode 100644 index 46093314..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Package addrs contains types that represent "addresses", which are -// references to specific objects within a Terraform configuration or -// state. -// -// All addresses have string representations based on HCL traversal syntax -// which should be used in the user-interface, and also in-memory -// representations that can be used internally. -// -// For object types that exist within Terraform modules a pair of types is -// used. The "local" part of the address is represented by a type, and then -// an absolute path to that object in the context of its module is represented -// by a type of the same name with an "Abs" prefix added, for "absolute". -// -// All types within this package should be treated as immutable, even if this -// is not enforced by the Go compiler. It is always an implementation error -// to modify an address object in-place after it is initially constructed. -package addrs diff --git a/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go deleted file mode 100644 index 7a638503..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// ForEachAttr is the address of an attribute referencing the current "for_each" object in -// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" -type ForEachAttr struct { - referenceable - Name string -} - -func (f ForEachAttr) String() string { - return "each." + f.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go deleted file mode 100644 index 975c72f1..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go +++ /dev/null @@ -1,50 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// InputVariable is the address of an input variable. -type InputVariable struct { - referenceable - Name string -} - -func (v InputVariable) String() string { - return "var." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v InputVariable) Absolute(m ModuleInstance) AbsInputVariableInstance { - return AbsInputVariableInstance{ - Module: m, - Variable: v, - } -} - -// AbsInputVariableInstance is the address of an input variable within a -// particular module instance. -type AbsInputVariableInstance struct { - Module ModuleInstance - Variable InputVariable -} - -// InputVariable returns the absolute address of the input variable of the -// given name inside the receiving module instance. -func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { - return AbsInputVariableInstance{ - Module: m, - Variable: InputVariable{ - Name: name, - }, - } -} - -func (v AbsInputVariableInstance) String() string { - if len(v.Module) == 0 { - return v.Variable.String() - } - - return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go deleted file mode 100644 index ff128be5..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go +++ /dev/null @@ -1,135 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// InstanceKey represents the key of an instance within an object that -// contains multiple instances due to using "count" or "for_each" arguments -// in configuration. -// -// IntKey and StringKey are the two implementations of this type. No other -// implementations are allowed. The single instance of an object that _isn't_ -// using "count" or "for_each" is represented by NoKey, which is a nil -// InstanceKey. -type InstanceKey interface { - instanceKeySigil() - String() string - - // Value returns the cty.Value of the appropriate type for the InstanceKey - // value. - Value() cty.Value -} - -// ParseInstanceKey returns the instance key corresponding to the given value, -// which must be known and non-null. -// -// If an unknown or null value is provided then this function will panic. This -// function is intended to deal with the values that would naturally be found -// in a hcl.TraverseIndex, which (when parsed from source, at least) can never -// contain unknown or null values. -func ParseInstanceKey(key cty.Value) (InstanceKey, error) { - switch key.Type() { - case cty.String: - return StringKey(key.AsString()), nil - case cty.Number: - var idx int - err := gocty.FromCtyValue(key, &idx) - return IntKey(idx), err - default: - return NoKey, fmt.Errorf("either a string or an integer is required") - } -} - -// NoKey represents the absense of an InstanceKey, for the single instance -// of a configuration object that does not use "count" or "for_each" at all. -var NoKey InstanceKey - -// IntKey is the InstanceKey representation representing integer indices, as -// used when the "count" argument is specified or if for_each is used with -// a sequence type. -type IntKey int - -func (k IntKey) instanceKeySigil() { -} - -func (k IntKey) String() string { - return fmt.Sprintf("[%d]", int(k)) -} - -func (k IntKey) Value() cty.Value { - return cty.NumberIntVal(int64(k)) -} - -// StringKey is the InstanceKey representation representing string indices, as -// used when the "for_each" argument is specified with a map or object type. -type StringKey string - -func (k StringKey) instanceKeySigil() { -} - -func (k StringKey) String() string { - // FIXME: This isn't _quite_ right because Go's quoted string syntax is - // slightly different than HCL's, but we'll accept it for now. - return fmt.Sprintf("[%q]", string(k)) -} - -func (k StringKey) Value() cty.Value { - return cty.StringVal(string(k)) -} - -// InstanceKeyLess returns true if the first given instance key i should sort -// before the second key j, and false otherwise. -func InstanceKeyLess(i, j InstanceKey) bool { - iTy := instanceKeyType(i) - jTy := instanceKeyType(j) - - switch { - case i == j: - return false - case i == NoKey: - return true - case j == NoKey: - return false - case iTy != jTy: - // The ordering here is arbitrary except that we want NoKeyType - // to sort before the others, so we'll just use the enum values - // of InstanceKeyType here (where NoKey is zero, sorting before - // any other). - return uint32(iTy) < uint32(jTy) - case iTy == IntKeyType: - return int(i.(IntKey)) < int(j.(IntKey)) - case iTy == StringKeyType: - return string(i.(StringKey)) < string(j.(StringKey)) - default: - // Shouldn't be possible to get down here in practice, since the - // above is exhaustive. - return false - } -} - -func instanceKeyType(k InstanceKey) InstanceKeyType { - if _, ok := k.(StringKey); ok { - return StringKeyType - } - if _, ok := k.(IntKey); ok { - return IntKeyType - } - return NoKeyType -} - -// InstanceKeyType represents the different types of instance key that are -// supported. Usually it is sufficient to simply type-assert an InstanceKey -// value to either IntKey or StringKey, but this type and its values can be -// used to represent the types themselves, rather than specific values -// of those types. -type InstanceKeyType rune - -const ( - NoKeyType InstanceKeyType = 0 - IntKeyType InstanceKeyType = 'I' - StringKeyType InstanceKeyType = 'S' -) diff --git a/vendor/github.com/hashicorp/terraform/addrs/local_value.go b/vendor/github.com/hashicorp/terraform/addrs/local_value.go deleted file mode 100644 index 61a07b9c..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/local_value.go +++ /dev/null @@ -1,48 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// LocalValue is the address of a local value. -type LocalValue struct { - referenceable - Name string -} - -func (v LocalValue) String() string { - return "local." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: v, - } -} - -// AbsLocalValue is the absolute address of a local value within a module instance. -type AbsLocalValue struct { - Module ModuleInstance - LocalValue LocalValue -} - -// LocalValue returns the absolute address of a local value of the given -// name within the receiving module instance. -func (m ModuleInstance) LocalValue(name string) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: LocalValue{ - Name: name, - }, - } -} - -func (v AbsLocalValue) String() string { - if len(v.Module) == 0 { - return v.LocalValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module.go b/vendor/github.com/hashicorp/terraform/addrs/module.go deleted file mode 100644 index 2a3ffe3f..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/module.go +++ /dev/null @@ -1,140 +0,0 @@ -package addrs - -import ( - "strings" -) - -// Module is an address for a module call within configuration. This is -// the static counterpart of ModuleInstance, representing a traversal through -// the static module call tree in configuration and does not take into account -// the potentially-multiple instances of a module that might be created by -// "count" and "for_each" arguments within those calls. -// -// This type should be used only in very specialized cases when working with -// the static module call tree. Type ModuleInstance is appropriate in more cases. -// -// Although Module is a slice, it should be treated as immutable after creation. -type Module []string - -// RootModule is the module address representing the root of the static module -// call tree, which is also the zero value of Module. -// -// Note that this is not the root of the dynamic module tree, which is instead -// represented by RootModuleInstance. -var RootModule Module - -// IsRoot returns true if the receiver is the address of the root module, -// or false otherwise. -func (m Module) IsRoot() bool { - return len(m) == 0 -} - -func (m Module) String() string { - if len(m) == 0 { - return "" - } - var steps []string - for _, s := range m { - steps = append(steps, "module", s) - } - return strings.Join(steps, ".") -} - -func (m Module) Equal(other Module) bool { - return m.String() == other.String() -} - -func (m Module) targetableSigil() { - // Module is targetable -} - -// TargetContains implements Targetable for Module by returning true if the given other -// address either matches the receiver, is a sub-module-instance of the -// receiver, or is a targetable absolute address within a module that -// is contained within the receiver. -func (m Module) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case Module: - if len(to) < len(m) { - // Can't be contained if the path is shorter - return false - } - // Other is contained if its steps match for the length of our own path. - for i, ourStep := range m { - otherStep := to[i] - if ourStep != otherStep { - return false - } - } - // If we fall out here then the prefixed matched, so it's contained. - return true - - case ModuleInstance: - return m.TargetContains(to.Module()) - - case ConfigResource: - return m.TargetContains(to.Module) - - case AbsResource: - return m.TargetContains(to.Module) - - case AbsResourceInstance: - return m.TargetContains(to.Module) - - default: - return false - } -} - -// Child returns the address of a child call in the receiver, identified by the -// given name. -func (m Module) Child(name string) Module { - ret := make(Module, 0, len(m)+1) - ret = append(ret, m...) - return append(ret, name) -} - -// Parent returns the address of the parent module of the receiver, or the -// receiver itself if there is no parent (if it's the root module address). -func (m Module) Parent() Module { - if len(m) == 0 { - return m - } - return m[:len(m)-1] -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m Module) Call() (Module, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - caller, callName := m[:len(m)-1], m[len(m)-1] - return caller, ModuleCall{ - Name: callName, - } -} - -// Ancestors returns a slice containing the receiver and all of its ancestor -// modules, all the way up to (and including) the root module. The result is -// ordered by depth, with the root module always first. -// -// Since the result always includes the root module, a caller may choose to -// ignore it by slicing the result with [1:]. -func (m Module) Ancestors() []Module { - ret := make([]Module, 0, len(m)+1) - for i := 0; i <= len(m); i++ { - ret = append(ret, m[:i]) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_call.go b/vendor/github.com/hashicorp/terraform/addrs/module_call.go deleted file mode 100644 index 02163ef7..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/module_call.go +++ /dev/null @@ -1,102 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// ModuleCall is the address of a call from the current module to a child -// module. -// -// There is no "Abs" version of ModuleCall because an absolute module path -// is represented by ModuleInstance. -type ModuleCall struct { - referenceable - Name string -} - -func (c ModuleCall) String() string { - return "module." + c.Name -} - -// Instance returns the address of an instance of the receiver identified by -// the given key. -func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { - return ModuleCallInstance{ - Call: c, - Key: key, - } -} - -// ModuleCallInstance is the address of one instance of a module created from -// a module call, which might create multiple instances using "count" or -// "for_each" arguments. -type ModuleCallInstance struct { - referenceable - Call ModuleCall - Key InstanceKey -} - -func (c ModuleCallInstance) String() string { - if c.Key == NoKey { - return c.Call.String() - } - return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) -} - -// ModuleInstance returns the address of the module instance that corresponds -// to the receiving call instance when resolved in the given calling module. -// In other words, it returns the child module instance that the receving -// call instance creates. -func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { - return caller.Child(c.Call.Name, c.Key) -} - -// Output returns the absolute address of an output of the receiver identified by its -// name. -func (c ModuleCallInstance) Output(name string) AbsModuleCallOutput { - return AbsModuleCallOutput{ - Call: c, - Name: name, - } -} - -// ModuleCallOutput is the address of a named output and its associated -// ModuleCall, which may expand into multiple module instances -type ModuleCallOutput struct { - referenceable - Call ModuleCall - Name string -} - -func (m ModuleCallOutput) String() string { - return fmt.Sprintf("%s.%s", m.Call.String(), m.Name) -} - -// AbsModuleCallOutput is the address of a particular named output produced by -// an instance of a module call. -type AbsModuleCallOutput struct { - referenceable - Call ModuleCallInstance - Name string -} - -// ModuleCallOutput returns the referenceable ModuleCallOutput for this -// particular instance. -func (co AbsModuleCallOutput) ModuleCallOutput() ModuleCallOutput { - return ModuleCallOutput{ - Call: co.Call.Call, - Name: co.Name, - } -} - -func (co AbsModuleCallOutput) String() string { - return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) -} - -// AbsOutputValue returns the absolute output value address that corresponds -// to the receving module call output address, once resolved in the given -// calling module. -func (co AbsModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { - moduleAddr := co.Call.ModuleInstance(caller) - return moduleAddr.OutputValue(co.Name) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go deleted file mode 100644 index 49cbf786..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go +++ /dev/null @@ -1,459 +0,0 @@ -package addrs - -import ( - "bytes" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - "github.com/hashicorp/terraform/tfdiags" -) - -// ModuleInstance is an address for a particular module instance within the -// dynamic module tree. This is an extension of the static traversals -// represented by type Module that deals with the possibility of a single -// module call producing multiple instances via the "count" and "for_each" -// arguments. -// -// Although ModuleInstance is a slice, it should be treated as immutable after -// creation. -type ModuleInstance []ModuleInstanceStep - -var ( - _ Targetable = ModuleInstance(nil) -) - -func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { - mi, remain, diags := parseModuleInstancePrefix(traversal) - if len(remain) != 0 { - if len(remain) == len(traversal) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "A module instance address must begin with \"module.\".", - Subject: remain.SourceRange().Ptr(), - }) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "The module instance address is followed by additional invalid content.", - Subject: remain.SourceRange().Ptr(), - }) - } - } - return mi, diags -} - -// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseModuleInstance. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - addr, addrDiags := ParseModuleInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { - remain := traversal - var mi ModuleInstance - var diags tfdiags.Diagnostics - - for len(remain) > 0 { - var next string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - next = tt.Name - case hcl.TraverseAttr: - next = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Module address prefix must be followed by dot and then a name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - - if next != "module" { - break - } - - kwRange := remain[0].SourceRange() - remain = remain[1:] - // If we have the prefix "module" then we should be followed by an - // module call name, as an attribute, and then optionally an index step - // giving the instance key. - if len(remain) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: &kwRange, - }) - break - } - - var moduleName string - switch tt := remain[0].(type) { - case hcl.TraverseAttr: - moduleName = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - remain = remain[1:] - step := ModuleInstanceStep{ - Name: moduleName, - } - - if len(remain) > 0 { - if idx, ok := remain[0].(hcl.TraverseIndex); ok { - remain = remain[1:] - - switch idx.Key.Type() { - case cty.String: - step.InstanceKey = StringKey(idx.Key.AsString()) - case cty.Number: - var idxInt int - err := gocty.FromCtyValue(idx.Key, &idxInt) - if err == nil { - step.InstanceKey = IntKey(idxInt) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: fmt.Sprintf("Invalid module index: %s.", err), - Subject: idx.SourceRange().Ptr(), - }) - } - default: - // Should never happen, because no other types are allowed in traversal indices. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Invalid module key: must be either a string or an integer.", - Subject: idx.SourceRange().Ptr(), - }) - } - } - } - - mi = append(mi, step) - } - - var retRemain hcl.Traversal - if len(remain) > 0 { - retRemain = make(hcl.Traversal, len(remain)) - copy(retRemain, remain) - // The first element here might be either a TraverseRoot or a - // TraverseAttr, depending on whether we had a module address on the - // front. To make life easier for callers, we'll normalize to always - // start with a TraverseRoot. - if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { - retRemain[0] = hcl.TraverseRoot{ - Name: tt.Name, - SrcRange: tt.SrcRange, - } - } - } - - return mi, retRemain, diags -} - -// UnkeyedInstanceShim is a shim method for converting a Module address to the -// equivalent ModuleInstance address that assumes that no modules have -// keyed instances. -// -// This is a temporary allowance for the fact that Terraform does not presently -// support "count" and "for_each" on modules, and thus graph building code that -// derives graph nodes from configuration must just assume unkeyed modules -// in order to construct the graph. At a later time when "count" and "for_each" -// support is added for modules, all callers of this method will need to be -// reworked to allow for keyed module instances. -func (m Module) UnkeyedInstanceShim() ModuleInstance { - path := make(ModuleInstance, len(m)) - for i, name := range m { - path[i] = ModuleInstanceStep{Name: name} - } - return path -} - -// ModuleInstanceStep is a single traversal step through the dynamic module -// tree. It is used only as part of ModuleInstance. -type ModuleInstanceStep struct { - Name string - InstanceKey InstanceKey -} - -// RootModuleInstance is the module instance address representing the root -// module, which is also the zero value of ModuleInstance. -var RootModuleInstance ModuleInstance - -// IsRoot returns true if the receiver is the address of the root module instance, -// or false otherwise. -func (m ModuleInstance) IsRoot() bool { - return len(m) == 0 -} - -// Child returns the address of a child module instance of the receiver, -// identified by the given name and key. -func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { - ret := make(ModuleInstance, 0, len(m)+1) - ret = append(ret, m...) - return append(ret, ModuleInstanceStep{ - Name: name, - InstanceKey: key, - }) -} - -// Parent returns the address of the parent module instance of the receiver, or -// the receiver itself if there is no parent (if it's the root module address). -func (m ModuleInstance) Parent() ModuleInstance { - if len(m) == 0 { - return m - } - return m[:len(m)-1] -} - -// String returns a string representation of the receiver, in the format used -// within e.g. user-provided resource addresses. -// -// The address of the root module has the empty string as its representation. -func (m ModuleInstance) String() string { - var buf bytes.Buffer - sep := "" - for _, step := range m { - buf.WriteString(sep) - buf.WriteString("module.") - buf.WriteString(step.Name) - if step.InstanceKey != NoKey { - buf.WriteString(step.InstanceKey.String()) - } - sep = "." - } - return buf.String() -} - -// Equal returns true if the receiver and the given other value -// contains the exact same parts. -func (m ModuleInstance) Equal(o ModuleInstance) bool { - return m.String() == o.String() -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (m ModuleInstance) Less(o ModuleInstance) bool { - if len(m) != len(o) { - // Shorter path sorts first. - return len(m) < len(o) - } - - for i := range m { - mS, oS := m[i], o[i] - switch { - case mS.Name != oS.Name: - return mS.Name < oS.Name - case mS.InstanceKey != oS.InstanceKey: - return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) - } - } - - return false -} - -// Ancestors returns a slice containing the receiver and all of its ancestor -// module instances, all the way up to (and including) the root module. -// The result is ordered by depth, with the root module always first. -// -// Since the result always includes the root module, a caller may choose to -// ignore it by slicing the result with [1:]. -func (m ModuleInstance) Ancestors() []ModuleInstance { - ret := make([]ModuleInstance, 0, len(m)+1) - for i := 0; i <= len(m); i++ { - ret = append(ret, m[:i]) - } - return ret -} - -// IsAncestor returns true if the receiver is an ancestor of the given -// other value. -func (m ModuleInstance) IsAncestor(o ModuleInstance) bool { - // Longer or equal sized paths means the receiver cannot - // be an ancestor of the given module insatnce. - if len(m) >= len(o) { - return false - } - - for i, ms := range m { - if ms.Name != o[i].Name { - return false - } - if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey { - return false - } - } - - return true -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module instance that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// A single module call can produce potentially many module instances, so the -// result discards any instance key that might be present on the last step -// of the instance. To retain this, use CallInstance instead. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCall{ - Name: lastStep.Name, - } -} - -// CallInstance returns the module call instance address that corresponds to -// the given module instance, along with the address of the module instance -// that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCallInstance and then returns a slice of the receiever that excludes -// that last part. This is just a convenience for situations where a call\ -// address is required, such as when dealing with *Reference and Referencable -// values. -func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { - if len(m) == 0 { - panic("cannot produce ModuleCallInstance for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCallInstance{ - Call: ModuleCall{ - Name: lastStep.Name, - }, - Key: lastStep.InstanceKey, - } -} - -// TargetContains implements Targetable by returning true if the given other -// address either matches the receiver, is a sub-module-instance of the -// receiver, or is a targetable absolute address within a module that -// is contained within the reciever. -func (m ModuleInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - case Module: - if len(to) < len(m) { - // Can't be contained if the path is shorter - return false - } - // Other is contained if its steps match for the length of our own path. - for i, ourStep := range m { - otherStep := to[i] - - // We can't contain an entire module if we have a specific instance - // key. The case of NoKey is OK because this address is either - // meant to address an unexpanded module, or a single instance of - // that module, and both of those are a covered in-full by the - // Module address. - if ourStep.InstanceKey != NoKey { - return false - } - - if ourStep.Name != otherStep { - return false - } - } - // If we fall out here then the prefixed matched, so it's contained. - return true - - case ModuleInstance: - if len(to) < len(m) { - return false - } - for i, ourStep := range m { - otherStep := to[i] - if ourStep != otherStep { - return false - } - } - return true - - case ConfigResource: - return m.TargetContains(to.Module) - - case AbsResource: - return m.TargetContains(to.Module) - - case AbsResourceInstance: - return m.TargetContains(to.Module) - - default: - return false - } -} - -// Module returns the address of the module that this instance is an instance -// of. -func (m ModuleInstance) Module() Module { - if len(m) == 0 { - return nil - } - ret := make(Module, len(m)) - for i, step := range m { - ret[i] = step.Name - } - return ret -} - -func (m ModuleInstance) targetableSigil() { - // ModuleInstance is targetable -} - -func (s ModuleInstanceStep) String() string { - if s.InstanceKey != NoKey { - return s.Name + s.InstanceKey.String() - } - return s.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/output_value.go b/vendor/github.com/hashicorp/terraform/addrs/output_value.go deleted file mode 100644 index f97eca19..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/output_value.go +++ /dev/null @@ -1,75 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// OutputValue is the address of an output value, in the context of the module -// that is defining it. -// -// This is related to but separate from ModuleCallOutput, which represents -// a module output from the perspective of its parent module. Since output -// values cannot be represented from the module where they are defined, -// OutputValue is not Referenceable, while ModuleCallOutput is. -type OutputValue struct { - Name string -} - -func (v OutputValue) String() string { - return "output." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: v, - } -} - -// AbsOutputValue is the absolute address of an output value within a module instance. -// -// This represents an output globally within the namespace of a particular -// configuration. It is related to but separate from ModuleCallOutput, which -// represents a module output from the perspective of its parent module. -type AbsOutputValue struct { - Module ModuleInstance - OutputValue OutputValue -} - -// OutputValue returns the absolute address of an output value of the given -// name within the receiving module instance. -func (m ModuleInstance) OutputValue(name string) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: OutputValue{ - Name: name, - }, - } -} - -func (v AbsOutputValue) String() string { - if v.Module.IsRoot() { - return v.OutputValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) -} - -// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, -// returning also the module instance that the ModuleCallOutput is relative -// to. -// -// The root module does not have a call, and so this method cannot be used -// with outputs in the root module, and will panic in that case. -func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, AbsModuleCallOutput) { - if v.Module.IsRoot() { - panic("ReferenceFromCall used with root module output") - } - - caller, call := v.Module.CallInstance() - return caller, AbsModuleCallOutput{ - Call: call, - Name: v.OutputValue.Name, - } -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go deleted file mode 100644 index eac77f30..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go +++ /dev/null @@ -1,345 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/tfdiags" -) - -// Reference describes a reference to an address with source location -// information. -type Reference struct { - Subject Referenceable - SourceRange tfdiags.SourceRange - Remaining hcl.Traversal -} - -// ParseRef attempts to extract a referencable address from the prefix of the -// given traversal, which must be an absolute traversal or this function -// will panic. -// -// If no error diagnostics are returned, the returned reference includes the -// address that was extracted, the source range it was extracted from, and any -// remaining relative traversal that was not consumed as part of the -// reference. -// -// If error diagnostics are returned then the Reference value is invalid and -// must not be used. -func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - ref, diags := parseRef(traversal) - - // Normalize a little to make life easier for callers. - if ref != nil { - if len(ref.Remaining) == 0 { - ref.Remaining = nil - } - } - - return ref, diags -} - -// ParseRefStr is a helper wrapper around ParseRef that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseRef. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned reference may be nil or incomplete. -func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - ref, targetDiags := ParseRef(traversal) - diags = diags.Append(targetDiags) - return ref, diags -} - -func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - switch root { - - case "count": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: CountAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "each": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: ForEachAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "data": - if len(traversal) < 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, - Subject: traversal.SourceRange().Ptr(), - }) - return nil, diags - } - remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser - return parseResourceRef(DataResourceMode, rootRange, remain) - - case "local": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: LocalValue{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "module": - callName, callRange, remain, diags := parseSingleAttrRef(traversal) - if diags.HasErrors() { - return nil, diags - } - - // A traversal starting with "module" can either be a reference to an - // entire module, or to a single output from a module instance, - // depending on what we find after this introducer. - callInstance := ModuleCallInstance{ - Call: ModuleCall{ - Name: callName, - }, - Key: NoKey, - } - - if len(remain) == 0 { - // Reference to an entire module. Might alternatively be a - // reference to a single instance of a particular module, but the - // caller will need to deal with that ambiguity since we don't have - // enough context here. - return &Reference{ - Subject: callInstance.Call, - SourceRange: tfdiags.SourceRangeFromHCL(callRange), - Remaining: remain, - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - callInstance.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - - if len(remain) == 0 { - // Also a reference to an entire module instance, but we have a key - // now. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), - Remaining: remain, - }, diags - } - } - - if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { - remain = remain[1:] - return &Reference{ - Subject: AbsModuleCallOutput{ - Name: attrTrav.Name, - Call: callInstance, - }, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), - Remaining: remain, - }, diags - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: "Module instance objects do not support this operation.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - - case "path": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: PathAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "self": - return &Reference{ - Subject: Self, - SourceRange: tfdiags.SourceRangeFromHCL(rootRange), - Remaining: traversal[1:], - }, diags - - case "terraform": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: TerraformAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "var": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: InputVariable{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - default: - return parseResourceRef(ManagedResourceMode, rootRange, traversal) - } -} - -func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, - Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - // If it isn't a TraverseRoot then it must be a "data" reference. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object does not support this operation.`, - Subject: traversal[0].SourceRange().Ptr(), - }) - return nil, diags - } - - attrTrav, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - var what string - switch mode { - case DataResourceMode: - what = "data source" - default: - what = "resource type" - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), - Subject: traversal[1].SourceRange().Ptr(), - }) - return nil, diags - } - name = attrTrav.Name - rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) - remain := traversal[2:] - - resourceAddr := Resource{ - Mode: mode, - Type: typeName, - Name: name, - } - resourceInstAddr := ResourceInstance{ - Resource: resourceAddr, - Key: NoKey, - } - - if len(remain) == 0 { - // This might actually be a reference to the collection of all instances - // of the resource, but we don't have enough context here to decide - // so we'll let the caller resolve that ambiguity. - return &Reference{ - Subject: resourceAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - rng = hcl.RangeBetween(rng, idxTrav.SrcRange) - } - - return &Reference{ - Subject: resourceInstAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags -} - -func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), - Subject: &rootRange, - }) - return "", hcl.Range{}, nil, diags - } - if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { - return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object does not support this operation.", root), - Subject: traversal[1].SourceRange().Ptr(), - }) - return "", hcl.Range{}, nil, diags -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go deleted file mode 100644 index c308525f..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go +++ /dev/null @@ -1,318 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/tfdiags" -) - -// Target describes a targeted address with source location information. -type Target struct { - Subject Targetable - SourceRange tfdiags.SourceRange -} - -// ParseTarget attempts to interpret the given traversal as a targetable -// address. The given traversal must be absolute, or this function will -// panic. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the Target value is invalid and -// must not be used. -func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { - path, remain, diags := parseModuleInstancePrefix(traversal) - if diags.HasErrors() { - return nil, diags - } - - rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) - - if len(remain) == 0 { - return &Target{ - Subject: path, - SourceRange: rng, - }, diags - } - - mode := ManagedResourceMode - if remain.RootName() == "data" { - mode = DataResourceMode - remain = remain[1:] - } - - if len(remain) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource specification must include a resource type and name.", - Subject: remain.SourceRange().Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - switch mode { - case ManagedResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource type name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - case DataResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A data source name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - default: - panic("unknown mode") - } - return nil, diags - } - - switch tt := remain[1].(type) { - case hcl.TraverseAttr: - name = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource name is required.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - var subject Targetable - remain = remain[2:] - switch len(remain) { - case 0: - subject = path.Resource(mode, typeName, name) - case 1: - if tt, ok := remain[0].(hcl.TraverseIndex); ok { - key, err := ParseInstanceKey(tt.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - - subject = path.ResourceInstance(mode, typeName, name, key) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource instance key must be given in square brackets.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Unexpected extra operators after address.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - return &Target{ - Subject: subject, - SourceRange: rng, - }, diags -} - -// ParseTargetStr is a helper wrapper around ParseTarget that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a target string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseTarget. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned target may be nil or incomplete. -func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - target, targetDiags := ParseTarget(traversal) - diags = diags.Append(targetDiags) - return target, diags -} - -// ParseAbsResource attempts to interpret the given traversal as an absolute -// resource address, using the same syntax as expected by ParseTarget. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the AbsResource value is invalid and -// must not be used. -func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) { - addr, diags := ParseTarget(traversal) - if diags.HasErrors() { - return AbsResource{}, diags - } - - switch tt := addr.Subject.(type) { - - case AbsResource: - return tt, diags - - case AbsResourceInstance: // Catch likely user error with specialized message - // Assume that the last element of the traversal must be the index, - // since that's required for a valid resource instance address. - indexStep := traversal[len(traversal)-1] - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.", - Subject: indexStep.SourceRange().Ptr(), - }) - return AbsResource{}, diags - - case ModuleInstance: // Catch likely user error with specialized message - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here. The module path must be followed by a resource specification.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResource{}, diags - - default: // Generic message for other address types - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResource{}, diags - - } -} - -// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a -// string and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address may be incomplete. -// -// Since this function has no context about the source of the given string, -// any returned diagnostics will not have meaningful source location -// information. -func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsResource{}, diags - } - - addr, addrDiags := ParseAbsResource(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ParseAbsResourceInstance attempts to interpret the given traversal as an -// absolute resource instance address, using the same syntax as expected by -// ParseTarget. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the AbsResource value is invalid and -// must not be used. -func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { - addr, diags := ParseTarget(traversal) - if diags.HasErrors() { - return AbsResourceInstance{}, diags - } - - switch tt := addr.Subject.(type) { - - case AbsResource: - return tt.Instance(NoKey), diags - - case AbsResourceInstance: - return tt, diags - - case ModuleInstance: // Catch likely user error with specialized message - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - default: // Generic message for other address types - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - } -} - -// ParseAbsResourceInstanceStr is a helper wrapper around -// ParseAbsResourceInstance that takes a string and parses it with the HCL -// native syntax traversal parser before interpreting it. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address may be incomplete. -// -// Since this function has no context about the source of the given string, -// any returned diagnostics will not have meaningful source location -// information. -func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsResourceInstance{}, diags - } - - addr, addrDiags := ParseAbsResourceInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go deleted file mode 100644 index cfc13f4b..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// PathAttr is the address of an attribute of the "path" object in -// the interpolation scope, like "path.module". -type PathAttr struct { - referenceable - Name string -} - -func (pa PathAttr) String() string { - return "path." + pa.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider.go b/vendor/github.com/hashicorp/terraform/addrs/provider.go deleted file mode 100644 index bc264d61..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/provider.go +++ /dev/null @@ -1,419 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" - - "golang.org/x/net/idna" - - "github.com/hashicorp/hcl/v2" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/tfdiags" -) - -// Provider encapsulates a single provider type. In the future this will be -// extended to include additional fields including Namespace and SourceHost -type Provider struct { - Type string - Namespace string - Hostname svchost.Hostname -} - -// DefaultRegistryHost is the hostname used for provider addresses that do -// not have an explicit hostname. -const DefaultRegistryHost = svchost.Hostname("registry.terraform.io") - -// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider -// namespace. Built-in provider addresses must also have their namespace set -// to BuiltInProviderNamespace in order to be considered as built-in. -const BuiltInProviderHost = svchost.Hostname("terraform.io") - -// BuiltInProviderNamespace is the provider namespace used for "built-in" -// providers. Built-in provider addresses must also have their hostname -// set to BuiltInProviderHost in order to be considered as built-in. -// -// The this namespace is literally named "builtin", in the hope that users -// who see FQNs containing this will be able to infer the way in which they are -// special, even if they haven't encountered the concept formally yet. -const BuiltInProviderNamespace = "builtin" - -// LegacyProviderNamespace is the special string used in the Namespace field -// of type Provider to mark a legacy provider address. This special namespace -// value would normally be invalid, and can be used only when the hostname is -// DefaultRegistryHost because that host owns the mapping from legacy name to -// FQN. -const LegacyProviderNamespace = "-" - -// String returns an FQN string, indended for use in machine-readable output. -func (pt Provider) String() string { - if pt.IsZero() { - panic("called String on zero-value addrs.Provider") - } - return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type -} - -// ForDisplay returns a user-friendly FQN string, simplified for readability. If -// the provider is using the default hostname, the hostname is omitted. -func (pt Provider) ForDisplay() string { - if pt.IsZero() { - panic("called ForDisplay on zero-value addrs.Provider") - } - - if pt.Hostname == DefaultRegistryHost { - return pt.Namespace + "/" + pt.Type - } - return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type -} - -// NewProvider constructs a provider address from its parts, and normalizes -// the namespace and type parts to lowercase using unicode case folding rules -// so that resulting addrs.Provider values can be compared using standard -// Go equality rules (==). -// -// The hostname is given as a svchost.Hostname, which is required by the -// contract of that type to have already been normalized for equality testing. -// -// This function will panic if the given namespace or type name are not valid. -// When accepting namespace or type values from outside the program, use -// ParseProviderPart first to check that the given value is valid. -func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { - if namespace == LegacyProviderNamespace { - // Legacy provider addresses must always be created via - // NewLegacyProvider so that we can use static analysis to find - // codepaths still working with those. - panic("attempt to create legacy provider address using NewProvider; use NewLegacyProvider instead") - } - - return Provider{ - Type: MustParseProviderPart(typeName), - Namespace: MustParseProviderPart(namespace), - Hostname: hostname, - } -} - -// ImpliedProviderForUnqualifiedType represents the rules for inferring what -// provider FQN a user intended when only a naked type name is available. -// -// For all except the type name "terraform" this returns a so-called "default" -// provider, which is under the registry.terraform.io/hashicorp/ namespace. -// -// As a special case, the string "terraform" maps to -// "terraform.io/builtin/terraform" because that is the more likely user -// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" -// which remains only for compatibility with older Terraform versions. -func ImpliedProviderForUnqualifiedType(typeName string) Provider { - switch typeName { - case "terraform": - // Note for future maintainers: any additional strings we add here - // as implied to be builtin must never also be use as provider names - // in the registry.terraform.io/hashicorp/... namespace, because - // otherwise older versions of Terraform could implicitly select - // the registry name instead of the internal one. - return NewBuiltInProvider(typeName) - default: - return NewDefaultProvider(typeName) - } -} - -// NewDefaultProvider returns the default address of a HashiCorp-maintained, -// Registry-hosted provider. -func NewDefaultProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: "hashicorp", - Hostname: DefaultRegistryHost, - } -} - -// NewBuiltInProvider returns the address of a "built-in" provider. See -// the docs for Provider.IsBuiltIn for more information. -func NewBuiltInProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: BuiltInProviderNamespace, - Hostname: BuiltInProviderHost, - } -} - -// NewLegacyProvider returns a mock address for a provider. -// This will be removed when ProviderType is fully integrated. -func NewLegacyProvider(name string) Provider { - return Provider{ - // We intentionally don't normalize and validate the legacy names, - // because existing code expects legacy provider names to pass through - // verbatim, even if not compliant with our new naming rules. - Type: name, - Namespace: LegacyProviderNamespace, - Hostname: DefaultRegistryHost, - } -} - -// LegacyString returns the provider type, which is frequently used -// interchangeably with provider name. This function can and should be removed -// when provider type is fully integrated. As a safeguard for future -// refactoring, this function panics if the Provider is not a legacy provider. -func (pt Provider) LegacyString() string { - if pt.IsZero() { - panic("called LegacyString on zero-value addrs.Provider") - } - if pt.Namespace != LegacyProviderNamespace { - panic(pt.String() + " is not a legacy addrs.Provider") - } - return pt.Type -} - -// IsZero returns true if the receiver is the zero value of addrs.Provider. -// -// The zero value is not a valid addrs.Provider and calling other methods on -// such a value is likely to either panic or otherwise misbehave. -func (pt Provider) IsZero() bool { - return pt == Provider{} -} - -// IsBuiltIn returns true if the receiver is the address of a "built-in" -// provider. That is, a provider under terraform.io/builtin/ which is -// included as part of the Terraform binary itself rather than one to be -// installed from elsewhere. -// -// These are ignored by the provider installer because they are assumed to -// already be available without any further installation. -func (pt Provider) IsBuiltIn() bool { - return pt.Hostname == BuiltInProviderHost && pt.Namespace == BuiltInProviderNamespace -} - -// LessThan returns true if the receiver should sort before the other given -// address in an ordered list of provider addresses. -// -// This ordering is an arbitrary one just to allow deterministic results from -// functions that would otherwise have no natural ordering. It's subject -// to change in future. -func (pt Provider) LessThan(other Provider) bool { - switch { - case pt.Hostname != other.Hostname: - return pt.Hostname < other.Hostname - case pt.Namespace != other.Namespace: - return pt.Namespace < other.Namespace - default: - return pt.Type < other.Type - } -} - -// IsLegacy returns true if the provider is a legacy-style provider -func (pt Provider) IsLegacy() bool { - if pt.IsZero() { - panic("called IsLegacy() on zero-value addrs.Provider") - } - - return pt.Hostname == DefaultRegistryHost && pt.Namespace == LegacyProviderNamespace - -} - -// IsDefault returns true if the provider is a default hashicorp provider -func (pt Provider) IsDefault() bool { - if pt.IsZero() { - panic("called IsDefault() on zero-value addrs.Provider") - } - - return pt.Hostname == DefaultRegistryHost && pt.Namespace == "hashicorp" -} - -// Equals returns true if the receiver and other provider have the same attributes. -func (pt Provider) Equals(other Provider) bool { - return pt == other -} - -// ParseProviderSourceString parses the source attribute and returns a provider. -// This is intended primarily to parse the FQN-like strings returned by -// terraform-config-inspect. -// -// The following are valid source string formats: -// name -// namespace/name -// hostname/namespace/name -func ParseProviderSourceString(str string) (Provider, tfdiags.Diagnostics) { - var ret Provider - var diags tfdiags.Diagnostics - - // split the source string into individual components - parts := strings.Split(str, "/") - if len(parts) == 0 || len(parts) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source string", - Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, - }) - return ret, diags - } - - // check for an invalid empty string in any part - for i := range parts { - if parts[i] == "" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source string", - Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, - }) - return ret, diags - } - } - - // check the 'name' portion, which is always the last part - givenName := parts[len(parts)-1] - name, err := ParseProviderPart(givenName) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider type", - Detail: fmt.Sprintf(`Invalid provider type %q in source %q: %s"`, givenName, str, err), - }) - return ret, diags - } - ret.Type = name - ret.Hostname = DefaultRegistryHost - - if len(parts) == 1 { - return NewDefaultProvider(parts[0]), diags - } - - if len(parts) >= 2 { - // the namespace is always the second-to-last part - givenNamespace := parts[len(parts)-2] - if givenNamespace == LegacyProviderNamespace { - // For now we're tolerating legacy provider addresses until we've - // finished updating the rest of the codebase to no longer use them, - // or else we'd get errors round-tripping through legacy subsystems. - ret.Namespace = LegacyProviderNamespace - } else { - namespace, err := ParseProviderPart(givenNamespace) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider namespace", - Detail: fmt.Sprintf(`Invalid provider namespace %q in source %q: %s"`, namespace, str, err), - }) - return Provider{}, diags - } - ret.Namespace = namespace - } - } - - // Final Case: 3 parts - if len(parts) == 3 { - // the namespace is always the first part in a three-part source string - hn, err := svchost.ForComparison(parts[0]) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source hostname", - Detail: fmt.Sprintf(`Invalid provider source hostname namespace %q in source %q: %s"`, hn, str, err), - }) - return Provider{}, diags - } - ret.Hostname = hn - } - - if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost { - // Legacy provider addresses must always be on the default registry - // host, because the default registry host decides what actual FQN - // each one maps to. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider namespace", - Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".", - }) - return Provider{}, diags - } - - return ret, diags -} - -// MustParseProviderSourceString is a wrapper around ParseProviderSourceString that panics if -// it returns an error. -func MustParseProviderSourceString(str string) Provider { - result, diags := ParseProviderSourceString(str) - if diags.HasErrors() { - panic(diags.Err().Error()) - } - return result -} - -// ParseProviderPart processes an addrs.Provider namespace or type string -// provided by an end-user, producing a normalized version if possible or -// an error if the string contains invalid characters. -// -// A provider part is processed in the same way as an individual label in a DNS -// domain name: it is transformed to lowercase per the usual DNS case mapping -// and normalization rules and may contain only letters, digits, and dashes. -// Additionally, dashes may not appear at the start or end of the string. -// -// These restrictions are intended to allow these names to appear in fussy -// contexts such as directory/file names on case-insensitive filesystems, -// repository names on GitHub, etc. We're using the DNS rules in particular, -// rather than some similar rules defined locally, because the hostname part -// of an addrs.Provider is already a hostname and it's ideal to use exactly -// the same case folding and normalization rules for all of the parts. -// -// In practice a provider type string conventionally does not contain dashes -// either. Such names are permitted, but providers with such type names will be -// hard to use because their resource type names will not be able to contain -// the provider type name and thus each resource will need an explicit provider -// address specified. (A real-world example of such a provider is the -// "google-beta" variant of the GCP provider, which has resource types that -// start with the "google_" prefix instead.) -// -// It's valid to pass the result of this function as the argument to a -// subsequent call, in which case the result will be identical. -func ParseProviderPart(given string) (string, error) { - if len(given) == 0 { - return "", fmt.Errorf("must have at least one character") - } - - // We're going to process the given name using the same "IDNA" library we - // use for the hostname portion, since it already implements the case - // folding rules we want. - // - // The idna library doesn't expose individual label parsing directly, but - // once we've verified it doesn't contain any dots we can just treat it - // like a top-level domain for this library's purposes. - if strings.ContainsRune(given, '.') { - return "", fmt.Errorf("dots are not allowed") - } - - // We don't allow names containing multiple consecutive dashes, just as - // a matter of preference: they look weird, confusing, or incorrect. - // This also, as a side-effect, prevents the use of the "punycode" - // indicator prefix "xn--" that would cause the IDNA library to interpret - // the given name as punycode, because that would be weird and unexpected. - if strings.Contains(given, "--") { - return "", fmt.Errorf("cannot use multiple consecutive dashes") - } - - result, err := idna.Lookup.ToUnicode(given) - if err != nil { - return "", fmt.Errorf("must contain only letters, digits, and dashes, and may not use leading or trailing dashes") - } - - return result, nil -} - -// MustParseProviderPart is a wrapper around ParseProviderPart that panics if -// it returns an error. -func MustParseProviderPart(given string) string { - result, err := ParseProviderPart(given) - if err != nil { - panic(err.Error()) - } - return result -} - -// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) -func IsProviderPartNormalized(str string) (bool, error) { - normalized, err := ParseProviderPart(str) - if err != nil { - return false, err - } - if str == normalized { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go deleted file mode 100644 index f83ce8d4..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go +++ /dev/null @@ -1,400 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ProviderConfig is an interface type whose dynamic type can be either -// LocalProviderConfig or AbsProviderConfig, in order to represent situations -// where a value might either be module-local or absolute but the decision -// cannot be made until runtime. -// -// Where possible, use either LocalProviderConfig or AbsProviderConfig directly -// instead, to make intent more clear. ProviderConfig can be used only in -// situations where the recipient of the value has some out-of-band way to -// determine a "current module" to use if the value turns out to be -// a LocalProviderConfig. -// -// Recipients of non-nil ProviderConfig values that actually need -// AbsProviderConfig values should call ResolveAbsProviderAddr on the -// *configs.Config value representing the root module configuration, which -// handles the translation from local to fully-qualified using mapping tables -// defined in the configuration. -// -// Recipients of a ProviderConfig value can assume it can contain only a -// LocalProviderConfig value, an AbsProviderConfigValue, or nil to represent -// the absense of a provider config in situations where that is meaningful. -type ProviderConfig interface { - providerConfig() -} - -// LocalProviderConfig is the address of a provider configuration from the -// perspective of references in a particular module. -// -// Finding the corresponding AbsProviderConfig will require looking up the -// LocalName in the providers table in the module's configuration; there is -// no syntax-only translation between these types. -type LocalProviderConfig struct { - LocalName string - - // If not empty, Alias identifies which non-default (aliased) provider - // configuration this address refers to. - Alias string -} - -var _ ProviderConfig = LocalProviderConfig{} - -// NewDefaultLocalProviderConfig returns the address of the default (un-aliased) -// configuration for the provider with the given local type name. -func NewDefaultLocalProviderConfig(LocalNameName string) LocalProviderConfig { - return LocalProviderConfig{ - LocalName: LocalNameName, - } -} - -// providerConfig Implements addrs.ProviderConfig. -func (pc LocalProviderConfig) providerConfig() {} - -func (pc LocalProviderConfig) String() string { - if pc.LocalName == "" { - // Should never happen; always indicates a bug - return "provider." - } - - if pc.Alias != "" { - return fmt.Sprintf("provider.%s.%s", pc.LocalName, pc.Alias) - } - - return "provider." + pc.LocalName -} - -// StringCompact is an alternative to String that returns the form that can -// be parsed by ParseProviderConfigCompact, without the "provider." prefix. -func (pc LocalProviderConfig) StringCompact() string { - if pc.Alias != "" { - return fmt.Sprintf("%s.%s", pc.LocalName, pc.Alias) - } - return pc.LocalName -} - -// AbsProviderConfig is the absolute address of a provider configuration -// within a particular module instance. -type AbsProviderConfig struct { - Module Module - Provider Provider - Alias string -} - -var _ ProviderConfig = AbsProviderConfig{} - -// ParseAbsProviderConfig parses the given traversal as an absolute provider -// address. The following are examples of traversals that can be successfully -// parsed as absolute provider configuration addresses: -// -// provider["registry.terraform.io/hashicorp/aws"] -// provider["registry.terraform.io/hashicorp/aws"].foo -// module.bar.provider["registry.terraform.io/hashicorp/aws"] -// module.bar.module.baz.provider["registry.terraform.io/hashicorp/aws"].foo -// -// This type of address is used, for example, to record the relationships -// between resources and provider configurations in the state structure. -// This type of address is not generally used in the UI, except in error -// messages that refer to provider configurations. -func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { - modInst, remain, diags := parseModuleInstancePrefix(traversal) - var ret AbsProviderConfig - - // Providers cannot resolve within module instances, so verify that there - // are no instance keys in the module path before converting to a Module. - for _, step := range modInst { - if step.InstanceKey != NoKey { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address cannot contain module indexes", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - } - ret.Module = modInst.Module() - - if len(remain) < 2 || remain.RootName() != "provider" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - if len(remain) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous operators after provider configuration alias.", - Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), - }) - return ret, diags - } - - if tt, ok := remain[1].(hcl.TraverseIndex); ok { - if !tt.Key.Type().Equals(cty.String) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The prefix \"provider.\" must be followed by a provider type name.", - Subject: remain[1].SourceRange().Ptr(), - }) - return ret, diags - } - p, sourceDiags := ParseProviderSourceString(tt.Key.AsString()) - ret.Provider = p - if sourceDiags.HasErrors() { - diags = diags.Append(sourceDiags) - return ret, diags - } - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The prefix \"provider.\" must be followed by a provider type name.", - Subject: remain[1].SourceRange().Ptr(), - }) - return ret, diags - } - - if len(remain) == 3 { - if tt, ok := remain[2].(hcl.TraverseAttr); ok { - ret.Alias = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider type name must be followed by a configuration alias name.", - Subject: remain[2].SourceRange().Ptr(), - }) - return ret, diags - } - } - - return ret, diags -} - -// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseAbsProviderConfig. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address is invalid. -func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsProviderConfig{}, diags - } - addr, addrDiags := ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -func ParseLegacyAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsProviderConfig{}, diags - } - - addr, addrDiags := ParseLegacyAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ParseLegacyAbsProviderConfig parses the given traversal as an absolute -// provider address. The following are examples of traversals that can be -// successfully parsed as legacy absolute provider configuration addresses: -// -// provider.aws -// provider.aws.foo -// module.bar.provider.aws -// module.bar.module.baz.provider.aws.foo -// -// This type of address is used in legacy state and may appear in state v4 if -// the provider config addresses have not been normalized to include provider -// FQN. -func ParseLegacyAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { - modInst, remain, diags := parseModuleInstancePrefix(traversal) - var ret AbsProviderConfig - - // Providers cannot resolve within module instances, so verify that there - // are no instance keys in the module path before converting to a Module. - for _, step := range modInst { - if step.InstanceKey != NoKey { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address cannot contain module indexes", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - } - ret.Module = modInst.Module() - - if len(remain) < 2 || remain.RootName() != "provider" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - if len(remain) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous operators after provider configuration alias.", - Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), - }) - return ret, diags - } - - // We always assume legacy-style providers in legacy state - if tt, ok := remain[1].(hcl.TraverseAttr); ok { - ret.Provider = NewLegacyProvider(tt.Name) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The prefix \"provider.\" must be followed by a provider type name.", - Subject: remain[1].SourceRange().Ptr(), - }) - return ret, diags - } - - if len(remain) == 3 { - if tt, ok := remain[2].(hcl.TraverseAttr); ok { - ret.Alias = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider type name must be followed by a configuration alias name.", - Subject: remain[2].SourceRange().Ptr(), - }) - return ret, diags - } - } - - return ret, diags -} - -// ProviderConfigDefault returns the address of the default provider config of -// the given type inside the recieving module instance. -func (m ModuleInstance) ProviderConfigDefault(provider Provider) AbsProviderConfig { - return AbsProviderConfig{ - Module: m.Module(), - Provider: provider, - } -} - -// ProviderConfigAliased returns the address of an aliased provider config of -// the given type and alias inside the recieving module instance. -func (m ModuleInstance) ProviderConfigAliased(provider Provider, alias string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m.Module(), - Provider: provider, - Alias: alias, - } -} - -// providerConfig Implements addrs.ProviderConfig. -func (pc AbsProviderConfig) providerConfig() {} - -// Inherited returns an address that the receiving configuration address might -// inherit from in a parent module. The second bool return value indicates if -// such inheritance is possible, and thus whether the returned address is valid. -// -// Inheritance is possible only for default (un-aliased) providers in modules -// other than the root module. Even if a valid address is returned, inheritence -// may not be performed for other reasons, such as if the calling module -// provided explicit provider configurations within the call for this module. -// The ProviderTransformer graph transform in the main terraform module has the -// authoritative logic for provider inheritance, and this method is here mainly -// just for its benefit. -func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { - // Can't inherit if we're already in the root. - if len(pc.Module) == 0 { - return AbsProviderConfig{}, false - } - - // Can't inherit if we have an alias. - if pc.Alias != "" { - return AbsProviderConfig{}, false - } - - // Otherwise, we might inherit from a configuration with the same - // provider type in the parent module instance. - parentMod := pc.Module.Parent() - return AbsProviderConfig{ - Module: parentMod, - Provider: pc.Provider, - }, true - -} - -// LegacyString() returns a legacy-style AbsProviderConfig string and should only be used for legacy state shimming. -func (pc AbsProviderConfig) LegacyString() string { - if pc.Alias != "" { - if len(pc.Module) == 0 { - return fmt.Sprintf("%s.%s.%s", "provider", pc.Provider.LegacyString(), pc.Alias) - } else { - return fmt.Sprintf("%s.%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString(), pc.Alias) - } - } - if len(pc.Module) == 0 { - return fmt.Sprintf("%s.%s", "provider", pc.Provider.LegacyString()) - } - return fmt.Sprintf("%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString()) -} - -// String() returns a string representation of an AbsProviderConfig in the following format: -// -// provider["example.com/namespace/name"] -// provider["example.com/namespace/name"].alias -// module.module-name.provider["example.com/namespace/name"] -// module.module-name.provider["example.com/namespace/name"].alias -func (pc AbsProviderConfig) String() string { - var parts []string - if len(pc.Module) > 0 { - parts = append(parts, pc.Module.String()) - } - - parts = append(parts, fmt.Sprintf("provider[%q]", pc.Provider)) - - if pc.Alias != "" { - parts = append(parts, pc.Alias) - } - - return strings.Join(parts, ".") -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go deleted file mode 100644 index 211083a5..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go +++ /dev/null @@ -1,20 +0,0 @@ -package addrs - -// Referenceable is an interface implemented by all address types that can -// appear as references in configuration language expressions. -type Referenceable interface { - // All implementations of this interface must be covered by the type switch - // in lang.Scope.buildEvalContext. - referenceableSigil() - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseRef to produce an identical - // result. - String() string -} - -type referenceable struct { -} - -func (r referenceable) referenceableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource.go b/vendor/github.com/hashicorp/terraform/addrs/resource.go deleted file mode 100644 index d2592c2a..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/resource.go +++ /dev/null @@ -1,335 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" -) - -// Resource is an address for a resource block within configuration, which -// contains potentially-multiple resource instances if that configuration -// block uses "count" or "for_each". -type Resource struct { - referenceable - Mode ResourceMode - Type string - Name string -} - -func (r Resource) String() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // Should never happen, but we'll return a string here rather than - // crashing just in case it does. - return fmt.Sprintf(".%s.%s", r.Type, r.Name) - } -} - -func (r Resource) Equal(o Resource) bool { - return r.String() == o.String() -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r Resource) Instance(key InstanceKey) ResourceInstance { - return ResourceInstance{ - Resource: r, - Key: key, - } -} - -// Absolute returns an AbsResource from the receiver and the given module -// instance address. -func (r Resource) Absolute(module ModuleInstance) AbsResource { - return AbsResource{ - Module: module, - Resource: r, - } -} - -// ImpliedProvider returns the implied provider type name, for e.g. the "aws" in -// "aws_instance" -func (r Resource) ImpliedProvider() string { - typeName := r.Type - if under := strings.Index(typeName, "_"); under != -1 { - typeName = typeName[:under] - } - - return typeName -} - -// ResourceInstance is an address for a specific instance of a resource. -// When a resource is defined in configuration with "count" or "for_each" it -// produces zero or more instances, which can be addressed using this type. -type ResourceInstance struct { - referenceable - Resource Resource - Key InstanceKey -} - -func (r ResourceInstance) ContainingResource() Resource { - return r.Resource -} - -func (r ResourceInstance) String() string { - if r.Key == NoKey { - return r.Resource.String() - } - return r.Resource.String() + r.Key.String() -} - -func (r ResourceInstance) Equal(o ResourceInstance) bool { - return r.String() == o.String() -} - -// Absolute returns an AbsResourceInstance from the receiver and the given module -// instance address. -func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { - return AbsResourceInstance{ - Module: module, - Resource: r, - } -} - -// AbsResource is an absolute address for a resource under a given module path. -type AbsResource struct { - targetable - Module ModuleInstance - Resource Resource -} - -// Resource returns the address of a particular resource within the receiver. -func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { - return AbsResource{ - Module: m, - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - } -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: r.Module, - Resource: r.Resource.Instance(key), - } -} - -// Config returns the unexpanded ConfigResource for this AbsResource. -func (r AbsResource) Config() ConfigResource { - return ConfigResource{ - Module: r.Module.Module(), - Resource: r.Resource, - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is either equal to the receiver or is an instance of the -// receiver. -func (r AbsResource) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResource: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - case ConfigResource: - // if an absolute resource from parsing a target address contains a - // ConfigResource, the string representation will match - return to.String() == r.String() - - case AbsResourceInstance: - return r.TargetContains(to.ContainingResource()) - - default: - return false - - } -} - -func (r AbsResource) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -func (r AbsResource) Equal(o AbsResource) bool { - return r.String() == o.String() -} - -// AbsResourceInstance is an absolute address for a resource instance under a -// given module path. -type AbsResourceInstance struct { - targetable - Module ModuleInstance - Resource ResourceInstance -} - -// ResourceInstance returns the address of a particular resource instance within the receiver. -func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: m, - Resource: ResourceInstance{ - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - Key: key, - }, - } -} - -// ContainingResource returns the address of the resource that contains the -// receving resource instance. In other words, it discards the key portion -// of the address to produce an AbsResource value. -func (r AbsResourceInstance) ContainingResource() AbsResource { - return AbsResource{ - Module: r.Module, - Resource: r.Resource.ContainingResource(), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is equal to the receiver. -func (r AbsResourceInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - // while we currently don't start with an AbsResourceInstance as a target - // address, check all resource types for consistency. - case AbsResourceInstance: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - case ConfigResource: - return to.String() == r.String() - case AbsResource: - return to.String() == r.String() - - default: - return false - - } -} - -func (r AbsResourceInstance) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { - return r.String() == o.String() -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { - switch { - - case len(r.Module) != len(o.Module): - return len(r.Module) < len(o.Module) - - case r.Module.String() != o.Module.String(): - return r.Module.Less(o.Module) - - case r.Resource.Resource.Mode != o.Resource.Resource.Mode: - return r.Resource.Resource.Mode == DataResourceMode - - case r.Resource.Resource.Type != o.Resource.Resource.Type: - return r.Resource.Resource.Type < o.Resource.Resource.Type - - case r.Resource.Resource.Name != o.Resource.Resource.Name: - return r.Resource.Resource.Name < o.Resource.Resource.Name - - case r.Resource.Key != o.Resource.Key: - return InstanceKeyLess(r.Resource.Key, o.Resource.Key) - - default: - return false - - } -} - -// ConfigResource is an address for a resource within a configuration. -type ConfigResource struct { - targetable - Module Module - Resource Resource -} - -// Resource returns the address of a particular resource within the module. -func (m Module) Resource(mode ResourceMode, typeName string, name string) ConfigResource { - return ConfigResource{ - Module: m, - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - } -} - -// Absolute produces the address for the receiver within a specific module instance. -func (r ConfigResource) Absolute(module ModuleInstance) AbsResource { - return AbsResource{ - Module: module, - Resource: r.Resource, - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is either equal to the receiver or is an instance of the -// receiver. -func (r ConfigResource) TargetContains(other Targetable) bool { - switch to := other.(type) { - case ConfigResource: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - case AbsResource: - return r.TargetContains(to.Config()) - case AbsResourceInstance: - return r.TargetContains(to.ContainingResource()) - default: - return false - } -} - -func (r ConfigResource) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -func (r ConfigResource) Equal(o ConfigResource) bool { - return r.String() == o.String() -} - -// ResourceMode defines which lifecycle applies to a given resource. Each -// resource lifecycle has a slightly different address format. -type ResourceMode rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode - -const ( - // InvalidResourceMode is the zero value of ResourceMode and is not - // a valid resource mode. - InvalidResourceMode ResourceMode = 0 - - // ManagedResourceMode indicates a managed resource, as defined by - // "resource" blocks in configuration. - ManagedResourceMode ResourceMode = 'M' - - // DataResourceMode indicates a data resource, as defined by - // "data" blocks in configuration. - DataResourceMode ResourceMode = 'D' -) diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go deleted file mode 100644 index 9bdbdc42..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go +++ /dev/null @@ -1,105 +0,0 @@ -package addrs - -import "fmt" - -// ResourceInstancePhase is a special kind of reference used only internally -// during graph building to represent resource instances that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via an instance phase -// or can declare that they reference an instance phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourceInstancePhase struct { - referenceable - ResourceInstance ResourceInstance - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourceInstancePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { - return ResourceInstancePhase{ - ResourceInstance: r, - Phase: rpt, - } -} - -// ContainingResource returns an address for the same phase of the resource -// that this instance belongs to. -func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { - return rp.ResourceInstance.Resource.Phase(rp.Phase) -} - -func (rp ResourceInstancePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) -} - -// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. -type ResourceInstancePhaseType string - -const ( - // ResourceInstancePhaseDestroy represents the "destroy" phase of a - // resource instance. - ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" - - // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy - // but is used for resources that have "create_before_destroy" set, thus - // requiring a different dependency ordering. - ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" -) - -func (rpt ResourceInstancePhaseType) String() string { - return string(rpt) -} - -// ResourcePhase is a special kind of reference used only internally -// during graph building to represent resources that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via a resource phase -// or can declare that they reference a resource phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// Since resources (as opposed to instances) aren't actually phased, this -// address type is used only as an approximation during initial construction -// of the resource-oriented plan graph, under the assumption that resource -// instances with ResourceInstancePhase addresses will be created in dynamic -// subgraphs during the graph walk. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourcePhase struct { - referenceable - Resource Resource - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourcePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { - return ResourcePhase{ - Resource: r, - Phase: rpt, - } -} - -func (rp ResourcePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go deleted file mode 100644 index 0b5c33f8..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. - -package addrs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidResourceMode-0] - _ = x[ManagedResourceMode-77] - _ = x[DataResourceMode-68] -} - -const ( - _ResourceMode_name_0 = "InvalidResourceMode" - _ResourceMode_name_1 = "DataResourceMode" - _ResourceMode_name_2 = "ManagedResourceMode" -) - -func (i ResourceMode) String() string { - switch { - case i == 0: - return _ResourceMode_name_0 - case i == 68: - return _ResourceMode_name_1 - case i == 77: - return _ResourceMode_name_2 - default: - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/self.go b/vendor/github.com/hashicorp/terraform/addrs/self.go deleted file mode 100644 index 7f24eaf0..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/self.go +++ /dev/null @@ -1,14 +0,0 @@ -package addrs - -// Self is the address of the special object "self" that behaves as an alias -// for a containing object currently in scope. -const Self selfT = 0 - -type selfT int - -func (s selfT) referenceableSigil() { -} - -func (s selfT) String() string { - return "self" -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/targetable.go b/vendor/github.com/hashicorp/terraform/addrs/targetable.go deleted file mode 100644 index 16819a5a..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/targetable.go +++ /dev/null @@ -1,26 +0,0 @@ -package addrs - -// Targetable is an interface implemented by all address types that can be -// used as "targets" for selecting sub-graphs of a graph. -type Targetable interface { - targetableSigil() - - // TargetContains returns true if the receiver is considered to contain - // the given other address. Containment, for the purpose of targeting, - // means that if a container address is targeted then all of the - // addresses within it are also implicitly targeted. - // - // A targetable address always contains at least itself. - TargetContains(other Targetable) bool - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseTarget to produce an - // identical result. - String() string -} - -type targetable struct { -} - -func (r targetable) targetableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go deleted file mode 100644 index a880182a..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// TerraformAttr is the address of an attribute of the "terraform" object in -// the interpolation scope, like "terraform.workspace". -type TerraformAttr struct { - referenceable - Name string -} - -func (ta TerraformAttr) String() string { - return "terraform." + ta.Name -} diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go deleted file mode 100644 index 9d80c42b..00000000 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ /dev/null @@ -1,92 +0,0 @@ -package config - -// Append appends one configuration to another. -// -// Append assumes that both configurations will not have -// conflicting variables, resources, etc. If they do, the -// problems will be caught in the validation phase. -// -// It is possible that c1, c2 on their own are not valid. For -// example, a resource in c2 may reference a variable in c1. But -// together, they would be valid. -func Append(c1, c2 *Config) (*Config, error) { - c := new(Config) - - // Append unknown keys, but keep them unique since it is a set - unknowns := make(map[string]struct{}) - for _, k := range c1.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - for _, k := range c2.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - c.Atlas = c1.Atlas - if c2.Atlas != nil { - c.Atlas = c2.Atlas - } - - // merge Terraform blocks - if c1.Terraform != nil { - c.Terraform = c1.Terraform - if c2.Terraform != nil { - c.Terraform.Merge(c2.Terraform) - } - } else { - c.Terraform = c2.Terraform - } - - if len(c1.Modules) > 0 || len(c2.Modules) > 0 { - c.Modules = make( - []*Module, 0, len(c1.Modules)+len(c2.Modules)) - c.Modules = append(c.Modules, c1.Modules...) - c.Modules = append(c.Modules, c2.Modules...) - } - - if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 { - c.Outputs = make( - []*Output, 0, len(c1.Outputs)+len(c2.Outputs)) - c.Outputs = append(c.Outputs, c1.Outputs...) - c.Outputs = append(c.Outputs, c2.Outputs...) - } - - if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 { - c.ProviderConfigs = make( - []*ProviderConfig, - 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs)) - c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...) - c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...) - } - - if len(c1.Resources) > 0 || len(c2.Resources) > 0 { - c.Resources = make( - []*Resource, - 0, len(c1.Resources)+len(c2.Resources)) - c.Resources = append(c.Resources, c1.Resources...) - c.Resources = append(c.Resources, c2.Resources...) - } - - if len(c1.Variables) > 0 || len(c2.Variables) > 0 { - c.Variables = make( - []*Variable, 0, len(c1.Variables)+len(c2.Variables)) - c.Variables = append(c.Variables, c1.Variables...) - c.Variables = append(c.Variables, c2.Variables...) - } - - if len(c1.Locals) > 0 || len(c2.Locals) > 0 { - c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) - c.Locals = append(c.Locals, c1.Locals...) - c.Locals = append(c.Locals, c2.Locals...) - } - - return c, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go deleted file mode 100644 index 497effef..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ /dev/null @@ -1,1171 +0,0 @@ -// The config package is responsible for loading and validating the -// configuration. -package config - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - hcl2 "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/helper/hilmapstructure" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/tfdiags" - "github.com/mitchellh/reflectwalk" -) - -// NameRegexp is the regular expression that all names (modules, providers, -// resources, etc.) must follow. -var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`) - -// Config is the configuration that comes from loading a collection -// of Terraform templates. -type Config struct { - // Dir is the path to the directory where this configuration was - // loaded from. If it is blank, this configuration wasn't loaded from - // any meaningful directory. - Dir string - - Terraform *Terraform - Atlas *AtlasConfig - Modules []*Module - ProviderConfigs []*ProviderConfig - Resources []*Resource - Variables []*Variable - Locals []*Local - Outputs []*Output - - // The fields below can be filled in by loaders for validation - // purposes. - unknownKeys []string -} - -// AtlasConfig is the configuration for building in HashiCorp's Atlas. -type AtlasConfig struct { - Name string - Include []string - Exclude []string -} - -// Module is a module used within a configuration. -// -// This does not represent a module itself, this represents a module -// call-site within an existing configuration. -type Module struct { - Name string - Source string - Version string - Providers map[string]string - RawConfig *RawConfig -} - -// ProviderConfig is the configuration for a resource provider. -// -// For example, Terraform needs to set the AWS access keys for the AWS -// resource provider. -type ProviderConfig struct { - Name string - Alias string - Version string - RawConfig *RawConfig -} - -// A resource represents a single Terraform resource in the configuration. -// A Terraform resource is something that supports some or all of the -// usual "create, read, update, delete" operations, depending on -// the given Mode. -type Resource struct { - Mode ResourceMode // which operations the resource supports - Name string - Type string - RawCount *RawConfig - RawConfig *RawConfig - Provisioners []*Provisioner - Provider string - DependsOn []string - Lifecycle ResourceLifecycle -} - -// Copy returns a copy of this Resource. Helpful for avoiding shared -// config pointers across multiple pieces of the graph that need to do -// interpolation. -func (r *Resource) Copy() *Resource { - n := &Resource{ - Mode: r.Mode, - Name: r.Name, - Type: r.Type, - RawCount: r.RawCount.Copy(), - RawConfig: r.RawConfig.Copy(), - Provisioners: make([]*Provisioner, 0, len(r.Provisioners)), - Provider: r.Provider, - DependsOn: make([]string, len(r.DependsOn)), - Lifecycle: *r.Lifecycle.Copy(), - } - for _, p := range r.Provisioners { - n.Provisioners = append(n.Provisioners, p.Copy()) - } - copy(n.DependsOn, r.DependsOn) - return n -} - -// ResourceLifecycle is used to store the lifecycle tuning parameters -// to allow customized behavior -type ResourceLifecycle struct { - CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` - PreventDestroy bool `mapstructure:"prevent_destroy"` - IgnoreChanges []string `mapstructure:"ignore_changes"` -} - -// Copy returns a copy of this ResourceLifecycle -func (r *ResourceLifecycle) Copy() *ResourceLifecycle { - n := &ResourceLifecycle{ - CreateBeforeDestroy: r.CreateBeforeDestroy, - PreventDestroy: r.PreventDestroy, - IgnoreChanges: make([]string, len(r.IgnoreChanges)), - } - copy(n.IgnoreChanges, r.IgnoreChanges) - return n -} - -// Provisioner is a configured provisioner step on a resource. -type Provisioner struct { - Type string - RawConfig *RawConfig - ConnInfo *RawConfig - - When ProvisionerWhen - OnFailure ProvisionerOnFailure -} - -// Copy returns a copy of this Provisioner -func (p *Provisioner) Copy() *Provisioner { - return &Provisioner{ - Type: p.Type, - RawConfig: p.RawConfig.Copy(), - ConnInfo: p.ConnInfo.Copy(), - When: p.When, - OnFailure: p.OnFailure, - } -} - -// Variable is a module argument defined within the configuration. -type Variable struct { - Name string - DeclaredType string `mapstructure:"type"` - Default interface{} - Description string -} - -// Local is a local value defined within the configuration. -type Local struct { - Name string - RawConfig *RawConfig -} - -// Output is an output defined within the configuration. An output is -// resulting data that is highlighted by Terraform when finished. An -// output marked Sensitive will be output in a masked form following -// application, but will still be available in state. -type Output struct { - Name string - DependsOn []string - Description string - Sensitive bool - RawConfig *RawConfig -} - -// VariableType is the type of value a variable is holding, and returned -// by the Type() function on variables. -type VariableType byte - -const ( - VariableTypeUnknown VariableType = iota - VariableTypeString - VariableTypeList - VariableTypeMap -) - -func (v VariableType) Printable() string { - switch v { - case VariableTypeString: - return "string" - case VariableTypeMap: - return "map" - case VariableTypeList: - return "list" - default: - return "unknown" - } -} - -// ProviderConfigName returns the name of the provider configuration in -// the given mapping that maps to the proper provider configuration -// for this resource. -func ProviderConfigName(t string, pcs []*ProviderConfig) string { - lk := "" - for _, v := range pcs { - k := v.Name - if strings.HasPrefix(t, k) && len(k) > len(lk) { - lk = k - } - } - - return lk -} - -// A unique identifier for this module. -func (r *Module) Id() string { - return fmt.Sprintf("%s", r.Name) -} - -// Count returns the count of this resource. -func (r *Resource) Count() (int, error) { - raw := r.RawCount.Value() - count, ok := r.RawCount.Value().(string) - if !ok { - return 0, fmt.Errorf( - "expected count to be a string or int, got %T", raw) - } - - v, err := strconv.ParseInt(count, 0, 0) - if err != nil { - return 0, fmt.Errorf( - "cannot parse %q as an integer", - count, - ) - } - - return int(v), nil -} - -// A unique identifier for this resource. -func (r *Resource) Id() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - panic(fmt.Errorf("unknown resource mode %s", r.Mode)) - } -} - -// Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() tfdiags.Diagnostics { - if c == nil { - return nil - } - - var diags tfdiags.Diagnostics - - for _, k := range c.unknownKeys { - diags = diags.Append( - fmt.Errorf("Unknown root level key: %s", k), - ) - } - - // Validate the Terraform config - if tf := c.Terraform; tf != nil { - errs := c.Terraform.Validate() - for _, err := range errs { - diags = diags.Append(err) - } - } - - vars := c.InterpolatedVariables() - varMap := make(map[string]*Variable) - for _, v := range c.Variables { - if _, ok := varMap[v.Name]; ok { - diags = diags.Append(fmt.Errorf( - "Variable '%s': duplicate found. Variable names must be unique.", - v.Name, - )) - } - - varMap[v.Name] = v - } - - for k, _ := range varMap { - if !NameRegexp.MatchString(k) { - diags = diags.Append(fmt.Errorf( - "variable %q: variable name must match regular expression %s", - k, NameRegexp, - )) - } - } - - for _, v := range c.Variables { - if v.Type() == VariableTypeUnknown { - diags = diags.Append(fmt.Errorf( - "Variable '%s': must be a string or a map", - v.Name, - )) - continue - } - - interp := false - fn := func(n ast.Node) (interface{}, error) { - // LiteralNode is a literal string (outside of a ${ ... } sequence). - // interpolationWalker skips most of these. but in particular it - // visits those that have escaped sequences (like $${foo}) as a - // signal that *some* processing is required on this string. For - // our purposes here though, this is fine and not an interpolation. - if _, ok := n.(*ast.LiteralNode); !ok { - interp = true - } - return "", nil - } - - w := &interpolationWalker{F: fn} - if v.Default != nil { - if err := reflectwalk.Walk(v.Default, w); err == nil { - if interp { - diags = diags.Append(fmt.Errorf( - "variable %q: default may not contain interpolations", - v.Name, - )) - } - } - } - } - - // Check for references to user variables that do not actually - // exist and record those errors. - for source, vs := range vars { - for _, v := range vs { - uv, ok := v.(*UserVariable) - if !ok { - continue - } - - if _, ok := varMap[uv.Name]; !ok { - diags = diags.Append(fmt.Errorf( - "%s: unknown variable referenced: '%s'; define it with a 'variable' block", - source, - uv.Name, - )) - } - } - } - - // Check that all count variables are valid. - for source, vs := range vars { - for _, rawV := range vs { - switch v := rawV.(type) { - case *CountVariable: - if v.Type == CountValueInvalid { - diags = diags.Append(fmt.Errorf( - "%s: invalid count variable: %s", - source, - v.FullKey(), - )) - } - case *PathVariable: - if v.Type == PathValueInvalid { - diags = diags.Append(fmt.Errorf( - "%s: invalid path variable: %s", - source, - v.FullKey(), - )) - } - } - } - } - - // Check that providers aren't declared multiple times and that their - // version constraints, where present, are syntactically valid. - providerSet := make(map[string]bool) - for _, p := range c.ProviderConfigs { - name := p.FullName() - if _, ok := providerSet[name]; ok { - diags = diags.Append(fmt.Errorf( - "provider.%s: multiple configurations present; only one configuration is allowed per provider", - name, - )) - continue - } - - if p.Version != "" { - _, err := discovery.ConstraintStr(p.Version).Parse() - if err != nil { - diags = diags.Append(&hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid provider version constraint", - Detail: fmt.Sprintf( - "The value %q given for provider.%s is not a valid version constraint.", - p.Version, name, - ), - // TODO: include a "Subject" source reference in here, - // once the config loader is able to retain source - // location information. - }) - } - } - - providerSet[name] = true - } - - // Check that all references to modules are valid - modules := make(map[string]*Module) - dupped := make(map[string]struct{}) - for _, m := range c.Modules { - // Check for duplicates - if _, ok := modules[m.Id()]; ok { - if _, ok := dupped[m.Id()]; !ok { - dupped[m.Id()] = struct{}{} - - diags = diags.Append(fmt.Errorf( - "module %q: module repeated multiple times", - m.Id(), - )) - } - - // Already seen this module, just skip it - continue - } - - modules[m.Id()] = m - - // Check that the source has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": m.Source, - }) - if err != nil { - diags = diags.Append(fmt.Errorf( - "module %q: module source error: %s", - m.Id(), err, - )) - } else if len(rc.Interpolations) > 0 { - diags = diags.Append(fmt.Errorf( - "module %q: module source cannot contain interpolations", - m.Id(), - )) - } - - // Check that the name matches our regexp - if !NameRegexp.Match([]byte(m.Name)) { - diags = diags.Append(fmt.Errorf( - "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores", - m.Id(), - )) - } - - // Check that the configuration can all be strings, lists or maps - raw := make(map[string]interface{}) - for k, v := range m.RawConfig.Raw { - var strVal string - if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { - raw[k] = strVal - continue - } - - var mapVal map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil { - raw[k] = mapVal - continue - } - - var sliceVal []interface{} - if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil { - raw[k] = sliceVal - continue - } - - diags = diags.Append(fmt.Errorf( - "module %q: argument %s must have a string, list, or map value", - m.Id(), k, - )) - } - - // Check for invalid count variables - for _, v := range m.RawConfig.Variables { - switch v.(type) { - case *CountVariable: - diags = diags.Append(fmt.Errorf( - "module %q: count variables are only valid within resources", - m.Name, - )) - case *SelfVariable: - diags = diags.Append(fmt.Errorf( - "module %q: self variables are only valid within resources", - m.Name, - )) - } - } - - // Update the raw configuration to only contain the string values - m.RawConfig, err = NewRawConfig(raw) - if err != nil { - diags = diags.Append(fmt.Errorf( - "%s: can't initialize configuration: %s", - m.Id(), err, - )) - } - - // check that all named providers actually exist - for _, p := range m.Providers { - if !providerSet[p] { - diags = diags.Append(fmt.Errorf( - "module %q: cannot pass non-existent provider %q", - m.Name, p, - )) - } - } - - } - dupped = nil - - // Check that all variables for modules reference modules that - // exist. - for source, vs := range vars { - for _, v := range vs { - mv, ok := v.(*ModuleVariable) - if !ok { - continue - } - - if _, ok := modules[mv.Name]; !ok { - diags = diags.Append(fmt.Errorf( - "%s: unknown module referenced: %s", - source, mv.Name, - )) - } - } - } - - // Check that all references to resources are valid - resources := make(map[string]*Resource) - dupped = make(map[string]struct{}) - for _, r := range c.Resources { - if _, ok := resources[r.Id()]; ok { - if _, ok := dupped[r.Id()]; !ok { - dupped[r.Id()] = struct{}{} - - diags = diags.Append(fmt.Errorf( - "%s: resource repeated multiple times", - r.Id(), - )) - } - } - - resources[r.Id()] = r - } - dupped = nil - - // Validate resources - for n, r := range resources { - // Verify count variables - for _, v := range r.RawCount.Variables { - switch v.(type) { - case *CountVariable: - diags = diags.Append(fmt.Errorf( - "%s: resource count can't reference count variable: %s", - n, v.FullKey(), - )) - case *SimpleVariable: - diags = diags.Append(fmt.Errorf( - "%s: resource count can't reference variable: %s", - n, v.FullKey(), - )) - - // Good - case *ModuleVariable: - case *ResourceVariable: - case *TerraformVariable: - case *UserVariable: - case *LocalVariable: - - default: - diags = diags.Append(fmt.Errorf( - "Internal error. Unknown type in count var in %s: %T", - n, v, - )) - } - } - - if !r.RawCount.couldBeInteger() { - diags = diags.Append(fmt.Errorf( - "%s: resource count must be an integer", n, - )) - } - r.RawCount.init() - - // Validate DependsOn - for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) { - diags = diags.Append(err) - } - - // Verify provisioners - for _, p := range r.Provisioners { - // This validation checks that there are no splat variables - // referencing ourself. This currently is not allowed. - - for _, v := range p.ConnInfo.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - diags = diags.Append(fmt.Errorf( - "%s: connection info cannot contain splat variable referencing itself", - n, - )) - break - } - } - - for _, v := range p.RawConfig.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - diags = diags.Append(fmt.Errorf( - "%s: connection info cannot contain splat variable referencing itself", - n, - )) - break - } - } - - // Check for invalid when/onFailure values, though this should be - // picked up by the loader we check here just in case. - if p.When == ProvisionerWhenInvalid { - diags = diags.Append(fmt.Errorf( - "%s: provisioner 'when' value is invalid", n, - )) - } - if p.OnFailure == ProvisionerOnFailureInvalid { - diags = diags.Append(fmt.Errorf( - "%s: provisioner 'on_failure' value is invalid", n, - )) - } - } - - // Verify ignore_changes contains valid entries - for _, v := range r.Lifecycle.IgnoreChanges { - if strings.Contains(v, "*") && v != "*" { - diags = diags.Append(fmt.Errorf( - "%s: ignore_changes does not support using a partial string together with a wildcard: %s", - n, v, - )) - } - } - - // Verify ignore_changes has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": r.Lifecycle.IgnoreChanges, - }) - if err != nil { - diags = diags.Append(fmt.Errorf( - "%s: lifecycle ignore_changes error: %s", - n, err, - )) - } else if len(rc.Interpolations) > 0 { - diags = diags.Append(fmt.Errorf( - "%s: lifecycle ignore_changes cannot contain interpolations", - n, - )) - } - - // If it is a data source then it can't have provisioners - if r.Mode == DataResourceMode { - if _, ok := r.RawConfig.Raw["provisioner"]; ok { - diags = diags.Append(fmt.Errorf( - "%s: data sources cannot have provisioners", - n, - )) - } - } - } - - for source, vs := range vars { - for _, v := range vs { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - id := rv.ResourceId() - if _, ok := resources[id]; !ok { - diags = diags.Append(fmt.Errorf( - "%s: unknown resource '%s' referenced in variable %s", - source, - id, - rv.FullKey(), - )) - continue - } - } - } - - // Check that all locals are valid - { - found := make(map[string]struct{}) - for _, l := range c.Locals { - if _, ok := found[l.Name]; ok { - diags = diags.Append(fmt.Errorf( - "%s: duplicate local. local value names must be unique", - l.Name, - )) - continue - } - found[l.Name] = struct{}{} - - for _, v := range l.RawConfig.Variables { - if _, ok := v.(*CountVariable); ok { - diags = diags.Append(fmt.Errorf( - "local %s: count variables are only valid within resources", l.Name, - )) - } - } - } - } - - // Check that all outputs are valid - { - found := make(map[string]struct{}) - for _, o := range c.Outputs { - // Verify the output is new - if _, ok := found[o.Name]; ok { - diags = diags.Append(fmt.Errorf( - "output %q: an output of this name was already defined", - o.Name, - )) - continue - } - found[o.Name] = struct{}{} - - var invalidKeys []string - valueKeyFound := false - for k := range o.RawConfig.Raw { - if k == "value" { - valueKeyFound = true - continue - } - if k == "sensitive" { - if sensitive, ok := o.RawConfig.config[k].(bool); ok { - if sensitive { - o.Sensitive = true - } - continue - } - - diags = diags.Append(fmt.Errorf( - "output %q: value for 'sensitive' must be boolean", - o.Name, - )) - continue - } - if k == "description" { - if desc, ok := o.RawConfig.config[k].(string); ok { - o.Description = desc - continue - } - - diags = diags.Append(fmt.Errorf( - "output %q: value for 'description' must be string", - o.Name, - )) - continue - } - invalidKeys = append(invalidKeys, k) - } - if len(invalidKeys) > 0 { - diags = diags.Append(fmt.Errorf( - "output %q: invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "), - )) - } - if !valueKeyFound { - diags = diags.Append(fmt.Errorf( - "output %q: missing required 'value' argument", o.Name, - )) - } - - for _, v := range o.RawConfig.Variables { - if _, ok := v.(*CountVariable); ok { - diags = diags.Append(fmt.Errorf( - "output %q: count variables are only valid within resources", - o.Name, - )) - } - } - - // Detect a common mistake of using a "count"ed resource in - // an output value without using the splat or index form. - // Prior to 0.11 this error was silently ignored, but outputs - // now have their errors checked like all other contexts. - // - // TODO: Remove this in 0.12. - for _, v := range o.RawConfig.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - // If the variable seems to be treating the referenced - // resource as a singleton (no count specified) then - // we'll check to make sure it is indeed a singleton. - // It's a warning if not. - - if rv.Multi || rv.Index != 0 { - // This reference is treating the resource as a - // multi-resource, so the warning doesn't apply. - continue - } - - for _, r := range c.Resources { - if r.Id() != rv.ResourceId() { - continue - } - - // We test specifically for the raw string "1" here - // because we _do_ want to generate this warning if - // the user has provided an expression that happens - // to return 1 right now, to catch situations where - // a count might dynamically be set to something - // other than 1 and thus splat syntax is still needed - // to be safe. - if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { - diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( - "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", - o.Name, - r.Id(), rv.Field, - r.Id(), rv.Field, - ))) - } - } - } - } - } - - // Validate the self variable - for source, rc := range c.rawConfigs() { - // Ignore provisioners. This is a pretty brittle way to do this, - // but better than also repeating all the resources. - if strings.Contains(source, "provision") { - continue - } - - for _, v := range rc.Variables { - if _, ok := v.(*SelfVariable); ok { - diags = diags.Append(fmt.Errorf( - "%s: cannot contain self-reference %s", - source, v.FullKey(), - )) - } - } - } - - return diags -} - -// InterpolatedVariables is a helper that returns a mapping of all the interpolated -// variables within the configuration. This is used to verify references -// are valid in the Validate step. -func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable { - result := make(map[string][]InterpolatedVariable) - for source, rc := range c.rawConfigs() { - for _, v := range rc.Variables { - result[source] = append(result[source], v) - } - } - return result -} - -// rawConfigs returns all of the RawConfigs that are available keyed by -// a human-friendly source. -func (c *Config) rawConfigs() map[string]*RawConfig { - result := make(map[string]*RawConfig) - for _, m := range c.Modules { - source := fmt.Sprintf("module '%s'", m.Name) - result[source] = m.RawConfig - } - - for _, pc := range c.ProviderConfigs { - source := fmt.Sprintf("provider config '%s'", pc.Name) - result[source] = pc.RawConfig - } - - for _, rc := range c.Resources { - source := fmt.Sprintf("resource '%s'", rc.Id()) - result[source+" count"] = rc.RawCount - result[source+" config"] = rc.RawConfig - - for i, p := range rc.Provisioners { - subsource := fmt.Sprintf( - "%s provisioner %s (#%d)", - source, p.Type, i+1) - result[subsource] = p.RawConfig - } - } - - for _, o := range c.Outputs { - source := fmt.Sprintf("output '%s'", o.Name) - result[source] = o.RawConfig - } - - return result -} - -func (c *Config) validateDependsOn( - n string, - v []string, - resources map[string]*Resource, - modules map[string]*Module) []error { - // Verify depends on points to resources that all exist - var errs []error - for _, d := range v { - // Check if we contain interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "value": d, - }) - if err == nil && len(rc.Variables) > 0 { - errs = append(errs, fmt.Errorf( - "%s: depends on value cannot contain interpolations: %s", - n, d)) - continue - } - - // If it is a module, verify it is a module - if strings.HasPrefix(d, "module.") { - name := d[len("module."):] - if _, ok := modules[name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent module '%s'", - n, name)) - } - - continue - } - - // Check resources - if _, ok := resources[d]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent resource '%s'", - n, d)) - } - } - - return errs -} - -func (m *Module) mergerName() string { - return m.Id() -} - -func (m *Module) mergerMerge(other merger) merger { - m2 := other.(*Module) - - result := *m - result.Name = m2.Name - result.RawConfig = result.RawConfig.merge(m2.RawConfig) - - if m2.Source != "" { - result.Source = m2.Source - } - - return &result -} - -func (o *Output) mergerName() string { - return o.Name -} - -func (o *Output) mergerMerge(m merger) merger { - o2 := m.(*Output) - - result := *o - result.Name = o2.Name - result.Description = o2.Description - result.RawConfig = result.RawConfig.merge(o2.RawConfig) - result.Sensitive = o2.Sensitive - result.DependsOn = o2.DependsOn - - return &result -} - -func (c *ProviderConfig) GoString() string { - return fmt.Sprintf("*%#v", *c) -} - -func (c *ProviderConfig) FullName() string { - if c.Alias == "" { - return c.Name - } - - return fmt.Sprintf("%s.%s", c.Name, c.Alias) -} - -func (c *ProviderConfig) mergerName() string { - return c.Name -} - -func (c *ProviderConfig) mergerMerge(m merger) merger { - c2 := m.(*ProviderConfig) - - result := *c - result.Name = c2.Name - result.RawConfig = result.RawConfig.merge(c2.RawConfig) - - if c2.Alias != "" { - result.Alias = c2.Alias - } - - return &result -} - -func (r *Resource) mergerName() string { - return r.Id() -} - -func (r *Resource) mergerMerge(m merger) merger { - r2 := m.(*Resource) - - result := *r - result.Mode = r2.Mode - result.Name = r2.Name - result.Type = r2.Type - result.RawConfig = result.RawConfig.merge(r2.RawConfig) - - if r2.RawCount.Value() != "1" { - result.RawCount = r2.RawCount - } - - if len(r2.Provisioners) > 0 { - result.Provisioners = r2.Provisioners - } - - return &result -} - -// Merge merges two variables to create a new third variable. -func (v *Variable) Merge(v2 *Variable) *Variable { - // Shallow copy the variable - result := *v - - // The names should be the same, but the second name always wins. - result.Name = v2.Name - - if v2.DeclaredType != "" { - result.DeclaredType = v2.DeclaredType - } - if v2.Default != nil { - result.Default = v2.Default - } - if v2.Description != "" { - result.Description = v2.Description - } - - return &result -} - -var typeStringMap = map[string]VariableType{ - "string": VariableTypeString, - "map": VariableTypeMap, - "list": VariableTypeList, -} - -// Type returns the type of variable this is. -func (v *Variable) Type() VariableType { - if v.DeclaredType != "" { - declaredType, ok := typeStringMap[v.DeclaredType] - if !ok { - return VariableTypeUnknown - } - - return declaredType - } - - return v.inferTypeFromDefault() -} - -// ValidateTypeAndDefault ensures that default variable value is compatible -// with the declared type (if one exists), and that the type is one which is -// known to Terraform -func (v *Variable) ValidateTypeAndDefault() error { - // If an explicit type is declared, ensure it is valid - if v.DeclaredType != "" { - if _, ok := typeStringMap[v.DeclaredType]; !ok { - validTypes := []string{} - for k := range typeStringMap { - validTypes = append(validTypes, k) - } - return fmt.Errorf( - "Variable '%s' type must be one of [%s] - '%s' is not a valid type", - v.Name, - strings.Join(validTypes, ", "), - v.DeclaredType, - ) - } - } - - if v.DeclaredType == "" || v.Default == nil { - return nil - } - - if v.inferTypeFromDefault() != v.Type() { - return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')", - v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable()) - } - - return nil -} - -func (v *Variable) mergerName() string { - return v.Name -} - -func (v *Variable) mergerMerge(m merger) merger { - return v.Merge(m.(*Variable)) -} - -// Required tests whether a variable is required or not. -func (v *Variable) Required() bool { - return v.Default == nil -} - -// inferTypeFromDefault contains the logic for the old method of inferring -// variable types - we can also use this for validating that the declared -// type matches the type of the default value -func (v *Variable) inferTypeFromDefault() VariableType { - if v.Default == nil { - return VariableTypeString - } - - var s string - if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil { - v.Default = s - return VariableTypeString - } - - var m map[string]interface{} - if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil { - v.Default = m - return VariableTypeMap - } - - var l []interface{} - if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil { - v.Default = l - return VariableTypeList - } - - return VariableTypeUnknown -} - -func (m ResourceMode) Taintable() bool { - switch m { - case ManagedResourceMode: - return true - case DataResourceMode: - return false - default: - panic(fmt.Errorf("unsupported ResourceMode value %s", m)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go deleted file mode 100644 index a6933c2a..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ /dev/null @@ -1,378 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// TestString is a Stringer-like function that outputs a string that can -// be used to easily compare multiple Config structures in unit tests. -// -// This function has no practical use outside of unit tests and debugging. -func (c *Config) TestString() string { - if c == nil { - return "" - } - - var buf bytes.Buffer - if len(c.Modules) > 0 { - buf.WriteString("Modules:\n\n") - buf.WriteString(modulesStr(c.Modules)) - buf.WriteString("\n\n") - } - - if len(c.Variables) > 0 { - buf.WriteString("Variables:\n\n") - buf.WriteString(variablesStr(c.Variables)) - buf.WriteString("\n\n") - } - - if len(c.ProviderConfigs) > 0 { - buf.WriteString("Provider Configs:\n\n") - buf.WriteString(providerConfigsStr(c.ProviderConfigs)) - buf.WriteString("\n\n") - } - - if len(c.Resources) > 0 { - buf.WriteString("Resources:\n\n") - buf.WriteString(resourcesStr(c.Resources)) - buf.WriteString("\n\n") - } - - if len(c.Outputs) > 0 { - buf.WriteString("Outputs:\n\n") - buf.WriteString(outputsStr(c.Outputs)) - buf.WriteString("\n") - } - - return strings.TrimSpace(buf.String()) -} - -func terraformStr(t *Terraform) string { - result := "" - - if b := t.Backend; b != nil { - result += fmt.Sprintf("backend (%s)\n", b.Type) - - keys := make([]string, 0, len(b.RawConfig.Raw)) - for k, _ := range b.RawConfig.Raw { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - result += fmt.Sprintf(" %s\n", k) - } - } - - return strings.TrimSpace(result) -} - -func modulesStr(ms []*Module) string { - result := "" - order := make([]int, 0, len(ms)) - ks := make([]string, 0, len(ms)) - mapping := make(map[string]int) - for i, m := range ms { - k := m.Id() - ks = append(ks, k) - mapping[k] = i - } - sort.Strings(ks) - for _, k := range ks { - order = append(order, mapping[k]) - } - - for _, i := range order { - m := ms[i] - result += fmt.Sprintf("%s\n", m.Id()) - - ks := make([]string, 0, len(m.RawConfig.Raw)) - for k, _ := range m.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - result += fmt.Sprintf(" source = %s\n", m.Source) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - } - - return strings.TrimSpace(result) -} - -func outputsStr(os []*Output) string { - ns := make([]string, 0, len(os)) - m := make(map[string]*Output) - for _, o := range os { - ns = append(ns, o.Name) - m[o.Name] = o - } - sort.Strings(ns) - - result := "" - for _, n := range ns { - o := m[n] - - result += fmt.Sprintf("%s\n", n) - - if len(o.DependsOn) > 0 { - result += fmt.Sprintf(" dependsOn\n") - for _, d := range o.DependsOn { - result += fmt.Sprintf(" %s\n", d) - } - } - - if len(o.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range o.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - - if o.Description != "" { - result += fmt.Sprintf(" description\n %s\n", o.Description) - } - } - - return strings.TrimSpace(result) -} - -func localsStr(ls []*Local) string { - ns := make([]string, 0, len(ls)) - m := make(map[string]*Local) - for _, l := range ls { - ns = append(ns, l.Name) - m[l.Name] = l - } - sort.Strings(ns) - - result := "" - for _, n := range ns { - l := m[n] - - result += fmt.Sprintf("%s\n", n) - - if len(l.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range l.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a provider configs field into a deterministic -// string value for comparison in tests. -func providerConfigsStr(pcs []*ProviderConfig) string { - result := "" - - ns := make([]string, 0, len(pcs)) - m := make(map[string]*ProviderConfig) - for _, n := range pcs { - ns = append(ns, n.Name) - m[n.Name] = n - } - sort.Strings(ns) - - for _, n := range ns { - pc := m[n] - - result += fmt.Sprintf("%s\n", n) - - keys := make([]string, 0, len(pc.RawConfig.Raw)) - for k, _ := range pc.RawConfig.Raw { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - result += fmt.Sprintf(" %s\n", k) - } - - if len(pc.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range pc.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a resources field into a deterministic -// string value for comparison in tests. -func resourcesStr(rs []*Resource) string { - result := "" - order := make([]int, 0, len(rs)) - ks := make([]string, 0, len(rs)) - mapping := make(map[string]int) - for i, r := range rs { - k := r.Id() - ks = append(ks, k) - mapping[k] = i - } - sort.Strings(ks) - for _, k := range ks { - order = append(order, mapping[k]) - } - - for _, i := range order { - r := rs[i] - result += fmt.Sprintf( - "%s (x%s)\n", - r.Id(), - r.RawCount.Value()) - - ks := make([]string, 0, len(r.RawConfig.Raw)) - for k, _ := range r.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - - if len(r.Provisioners) > 0 { - result += fmt.Sprintf(" provisioners\n") - for _, p := range r.Provisioners { - when := "" - if p.When != ProvisionerWhenCreate { - when = fmt.Sprintf(" (%s)", p.When.String()) - } - - result += fmt.Sprintf(" %s%s\n", p.Type, when) - - if p.OnFailure != ProvisionerOnFailureFail { - result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String()) - } - - ks := make([]string, 0, len(p.RawConfig.Raw)) - for k, _ := range p.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - } - } - - if len(r.DependsOn) > 0 { - result += fmt.Sprintf(" dependsOn\n") - for _, d := range r.DependsOn { - result += fmt.Sprintf(" %s\n", d) - } - } - - if len(r.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - - ks := make([]string, 0, len(r.RawConfig.Variables)) - for k, _ := range r.RawConfig.Variables { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - rawV := r.RawConfig.Variables[k] - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a variables field into a deterministic -// string value for comparison in tests. -func variablesStr(vs []*Variable) string { - result := "" - ks := make([]string, 0, len(vs)) - m := make(map[string]*Variable) - for _, v := range vs { - ks = append(ks, v.Name) - m[v.Name] = v - } - sort.Strings(ks) - - for _, k := range ks { - v := m[k] - - required := "" - if v.Required() { - required = " (required)" - } - - declaredType := "" - if v.DeclaredType != "" { - declaredType = fmt.Sprintf(" (%s)", v.DeclaredType) - } - - if v.Default == nil || v.Default == "" { - v.Default = "<>" - } - if v.Description == "" { - v.Description = "<>" - } - - result += fmt.Sprintf( - "%s%s%s\n %v\n %s\n", - k, - required, - declaredType, - v.Default, - v.Description) - } - - return strings.TrimSpace(result) -} diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go deleted file mode 100644 index 8535c964..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config_terraform.go +++ /dev/null @@ -1,117 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-version" - "github.com/mitchellh/hashstructure" -) - -// Terraform is the Terraform meta-configuration that can be present -// in configuration files for configuring Terraform itself. -type Terraform struct { - RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint) - Backend *Backend // See Backend struct docs -} - -// Validate performs the validation for just the Terraform configuration. -func (t *Terraform) Validate() []error { - var errs []error - - if raw := t.RequiredVersion; raw != "" { - // Check that the value has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": raw, - }) - if err != nil { - errs = append(errs, fmt.Errorf( - "terraform.required_version: %s", err)) - } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "terraform.required_version: cannot contain interpolations")) - } else { - // Check it is valid - _, err := version.NewConstraint(raw) - if err != nil { - errs = append(errs, fmt.Errorf( - "terraform.required_version: invalid syntax: %s", err)) - } - } - } - - if t.Backend != nil { - errs = append(errs, t.Backend.Validate()...) - } - - return errs -} - -// Merge t with t2. -// Any conflicting fields are overwritten by t2. -func (t *Terraform) Merge(t2 *Terraform) { - if t2.RequiredVersion != "" { - t.RequiredVersion = t2.RequiredVersion - } - - if t2.Backend != nil { - t.Backend = t2.Backend - } -} - -// Backend is the configuration for the "backend" to use with Terraform. -// A backend is responsible for all major behavior of Terraform's core. -// The abstraction layer above the core (the "backend") allows for behavior -// such as remote operation. -type Backend struct { - Type string - RawConfig *RawConfig - - // Hash is a unique hash code representing the original configuration - // of the backend. This won't be recomputed unless Rehash is called. - Hash uint64 -} - -// Rehash returns a unique content hash for this backend's configuration -// as a uint64 value. -func (b *Backend) Rehash() uint64 { - // If we have no backend, the value is zero - if b == nil { - return 0 - } - - // Use hashstructure to hash only our type with the config. - code, err := hashstructure.Hash(map[string]interface{}{ - "type": b.Type, - "config": b.RawConfig.Raw, - }, nil) - - // This should never happen since we have just some basic primitives - // so panic if there is an error. - if err != nil { - panic(err) - } - - return code -} - -func (b *Backend) Validate() []error { - if len(b.RawConfig.Interpolations) > 0 { - return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))} - } - - return nil -} - -const errBackendInterpolations = ` -terraform.backend: configuration cannot contain interpolations - -The backend configuration is loaded by Terraform extremely early, before -the core of Terraform can be initialized. This is necessary because the backend -dictates the behavior of that core. The core is what handles interpolation -processing. Because of this, interpolations cannot be used in backend -configuration. - -If you'd like to parameterize backend configuration, we recommend using -partial configuration with the "-backend-config" flag to "terraform init". -` diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go deleted file mode 100644 index 08dc0fe9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config_tree.go +++ /dev/null @@ -1,43 +0,0 @@ -package config - -// configTree represents a tree of configurations where the root is the -// first file and its children are the configurations it has imported. -type configTree struct { - Path string - Config *Config - Children []*configTree -} - -// Flatten flattens the entire tree down to a single merged Config -// structure. -func (t *configTree) Flatten() (*Config, error) { - // No children is easy: we're already merged! - if len(t.Children) == 0 { - return t.Config, nil - } - - // Depth-first, merge all the children first. - childConfigs := make([]*Config, len(t.Children)) - for i, ct := range t.Children { - c, err := ct.Flatten() - if err != nil { - return nil, err - } - - childConfigs[i] = c - } - - // Merge all the children in order - config := childConfigs[0] - childConfigs = childConfigs[1:] - for _, config2 := range childConfigs { - var err error - config, err = Merge(config, config2) - if err != nil { - return nil, err - } - } - - // Merge the final merged child config with our own - return Merge(config, t.Config) -} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go deleted file mode 100644 index 08cbc773..00000000 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ /dev/null @@ -1,151 +0,0 @@ -package config - -import ( - "bufio" - "fmt" - "io" - "os" - - "github.com/hashicorp/errwrap" -) - -// configurable is an interface that must be implemented by any configuration -// formats of Terraform in order to return a *Config. -type configurable interface { - Config() (*Config, error) -} - -// importTree is the result of the first-pass load of the configuration -// files. It is a tree of raw configurables and then any children (their -// imports). -// -// An importTree can be turned into a configTree. -type importTree struct { - Path string - Raw configurable - Children []*importTree -} - -// This is the function type that must be implemented by the configuration -// file loader to turn a single file into a configurable and any additional -// imports. -type fileLoaderFunc func(path string) (configurable, []string, error) - -// Set this to a non-empty value at link time to enable the HCL2 experiment. -// This is not currently enabled for release builds. -// -// For example: -// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform -var enableHCL2Experiment = "" - -// loadTree takes a single file and loads the entire importTree for that -// file. This function detects what kind of configuration file it is an -// executes the proper fileLoaderFunc. -func loadTree(root string) (*importTree, error) { - var f fileLoaderFunc - - // HCL2 experiment is currently activated at build time via the linker. - // See the comment on this variable for more information. - if enableHCL2Experiment == "" { - // Main-line behavior: always use the original HCL parser - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: - } - } else { - // Experimental behavior: use the HCL2 parser if the opt-in comment - // is present. - switch ext(root) { - case ".tf": - // We need to sniff the file for the opt-in comment line to decide - // if the file is participating in the HCL2 experiment. - cf, err := os.Open(root) - if err != nil { - return nil, err - } - sc := bufio.NewScanner(cf) - for sc.Scan() { - if sc.Text() == "#terraform:hcl2" { - f = globalHCL2Loader.loadFile - } - } - if f == nil { - f = loadFileHcl - } - case ".tf.json": - f = loadFileHcl - default: - } - } - - if f == nil { - return nil, fmt.Errorf( - "%s: unknown configuration format. Use '.tf' or '.tf.json' extension", - root) - } - - c, imps, err := f(root) - if err != nil { - return nil, err - } - - children := make([]*importTree, len(imps)) - for i, imp := range imps { - t, err := loadTree(imp) - if err != nil { - return nil, err - } - - children[i] = t - } - - return &importTree{ - Path: root, - Raw: c, - Children: children, - }, nil -} - -// Close releases any resources we might be holding open for the importTree. -// -// This can safely be called even while ConfigTree results are alive. The -// importTree is not bound to these. -func (t *importTree) Close() error { - if c, ok := t.Raw.(io.Closer); ok { - c.Close() - } - for _, ct := range t.Children { - ct.Close() - } - - return nil -} - -// ConfigTree traverses the importTree and turns each node into a *Config -// object, ultimately returning a *configTree. -func (t *importTree) ConfigTree() (*configTree, error) { - config, err := t.Raw.Config() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err) - } - - // Build our result - result := &configTree{ - Path: t.Path, - Config: config, - } - - // Build the config trees for the children - result.Children = make([]*configTree, len(t.Children)) - for i, ct := range t.Children { - t, err := ct.ConfigTree() - if err != nil { - return nil, err - } - - result.Children[i] = t - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go deleted file mode 100644 index 4cc9e975..00000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ /dev/null @@ -1,435 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/hil/ast" -) - -// An InterpolatedVariable is a variable reference within an interpolation. -// -// Implementations of this interface represents various sources where -// variables can come from: user variables, resources, etc. -type InterpolatedVariable interface { - FullKey() string - SourceRange() tfdiags.SourceRange -} - -// varRange can be embedded into an InterpolatedVariable implementation to -// implement the SourceRange method. -type varRange struct { - rng tfdiags.SourceRange -} - -func (r varRange) SourceRange() tfdiags.SourceRange { - return r.rng -} - -// CountVariable is a variable for referencing information about -// the count. -type CountVariable struct { - Type CountValueType - key string - varRange -} - -// CountValueType is the type of the count variable that is referenced. -type CountValueType byte - -const ( - CountValueInvalid CountValueType = iota - CountValueIndex -) - -// A ModuleVariable is a variable that is referencing the output -// of a module, such as "${module.foo.bar}" -type ModuleVariable struct { - Name string - Field string - key string - varRange -} - -// A PathVariable is a variable that references path information about the -// module. -type PathVariable struct { - Type PathValueType - key string - varRange -} - -type PathValueType byte - -const ( - PathValueInvalid PathValueType = iota - PathValueCwd - PathValueModule - PathValueRoot -) - -// A ResourceVariable is a variable that is referencing the field -// of a resource, such as "${aws_instance.foo.ami}" -type ResourceVariable struct { - Mode ResourceMode - Type string // Resource type, i.e. "aws_instance" - Name string // Resource name - Field string // Resource field - - Multi bool // True if multi-variable: aws_instance.foo.*.id - Index int // Index for multi-variable: aws_instance.foo.1.id == 1 - - key string - varRange -} - -// SelfVariable is a variable that is referencing the same resource -// it is running on: "${self.address}" -type SelfVariable struct { - Field string - - key string - varRange -} - -// SimpleVariable is an unprefixed variable, which can show up when users have -// strings they are passing down to resources that use interpolation -// internally. The template_file resource is an example of this. -type SimpleVariable struct { - Key string - varRange -} - -// TerraformVariable is a "terraform."-prefixed variable used to access -// metadata about the Terraform run. -type TerraformVariable struct { - Field string - key string - varRange -} - -// A UserVariable is a variable that is referencing a user variable -// that is inputted from outside the configuration. This looks like -// "${var.foo}" -type UserVariable struct { - Name string - Elem string - - key string - varRange -} - -// A LocalVariable is a variable that references a local value defined within -// the current module, via a "locals" block. This looks like "${local.foo}". -type LocalVariable struct { - Name string - varRange -} - -func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { - if strings.HasPrefix(v, "count.") { - return NewCountVariable(v) - } else if strings.HasPrefix(v, "path.") { - return NewPathVariable(v) - } else if strings.HasPrefix(v, "self.") { - return NewSelfVariable(v) - } else if strings.HasPrefix(v, "terraform.") { - return NewTerraformVariable(v) - } else if strings.HasPrefix(v, "var.") { - return NewUserVariable(v) - } else if strings.HasPrefix(v, "local.") { - return NewLocalVariable(v) - } else if strings.HasPrefix(v, "module.") { - return NewModuleVariable(v) - } else if !strings.ContainsRune(v, '.') { - return NewSimpleVariable(v) - } else { - return NewResourceVariable(v) - } -} - -func NewCountVariable(key string) (*CountVariable, error) { - var fieldType CountValueType - parts := strings.SplitN(key, ".", 2) - switch parts[1] { - case "index": - fieldType = CountValueIndex - } - - return &CountVariable{ - Type: fieldType, - key: key, - }, nil -} - -func (c *CountVariable) FullKey() string { - return c.key -} - -func NewModuleVariable(key string) (*ModuleVariable, error) { - parts := strings.SplitN(key, ".", 3) - if len(parts) < 3 { - return nil, fmt.Errorf( - "%s: module variables must be three parts: module.name.attr", - key) - } - - return &ModuleVariable{ - Name: parts[1], - Field: parts[2], - key: key, - }, nil -} - -func (v *ModuleVariable) FullKey() string { - return v.key -} - -func (v *ModuleVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewPathVariable(key string) (*PathVariable, error) { - var fieldType PathValueType - parts := strings.SplitN(key, ".", 2) - switch parts[1] { - case "cwd": - fieldType = PathValueCwd - case "module": - fieldType = PathValueModule - case "root": - fieldType = PathValueRoot - } - - return &PathVariable{ - Type: fieldType, - key: key, - }, nil -} - -func (v *PathVariable) FullKey() string { - return v.key -} - -func NewResourceVariable(key string) (*ResourceVariable, error) { - var mode ResourceMode - var parts []string - if strings.HasPrefix(key, "data.") { - mode = DataResourceMode - parts = strings.SplitN(key, ".", 4) - if len(parts) < 4 { - return nil, fmt.Errorf( - "%s: data variables must be four parts: data.TYPE.NAME.ATTR", - key) - } - - // Don't actually need the "data." prefix for parsing, since it's - // always constant. - parts = parts[1:] - } else { - mode = ManagedResourceMode - parts = strings.SplitN(key, ".", 3) - if len(parts) < 3 { - return nil, fmt.Errorf( - "%s: resource variables must be three parts: TYPE.NAME.ATTR", - key) - } - } - - field := parts[2] - multi := false - var index int - - if idx := strings.Index(field, "."); idx != -1 { - indexStr := field[:idx] - multi = indexStr == "*" - index = -1 - - if !multi { - indexInt, err := strconv.ParseInt(indexStr, 0, 0) - if err == nil { - multi = true - index = int(indexInt) - } - } - - if multi { - field = field[idx+1:] - } - } - - return &ResourceVariable{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Field: field, - Multi: multi, - Index: index, - key: key, - }, nil -} - -func (v *ResourceVariable) ResourceId() string { - switch v.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", v.Type, v.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", v.Type, v.Name) - default: - panic(fmt.Errorf("unknown resource mode %s", v.Mode)) - } -} - -func (v *ResourceVariable) FullKey() string { - return v.key -} - -func NewSelfVariable(key string) (*SelfVariable, error) { - field := key[len("self."):] - - return &SelfVariable{ - Field: field, - - key: key, - }, nil -} - -func (v *SelfVariable) FullKey() string { - return v.key -} - -func (v *SelfVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{Key: key}, nil -} - -func (v *SimpleVariable) FullKey() string { - return v.Key -} - -func (v *SimpleVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewTerraformVariable(key string) (*TerraformVariable, error) { - field := key[len("terraform."):] - return &TerraformVariable{ - Field: field, - key: key, - }, nil -} - -func (v *TerraformVariable) FullKey() string { - return v.key -} - -func (v *TerraformVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewUserVariable(key string) (*UserVariable, error) { - name := key[len("var."):] - elem := "" - if idx := strings.Index(name, "."); idx > -1 { - elem = name[idx+1:] - name = name[:idx] - } - - if len(elem) > 0 { - return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem) - } - - return &UserVariable{ - key: key, - - Name: name, - Elem: elem, - }, nil -} - -func (v *UserVariable) FullKey() string { - return v.key -} - -func (v *UserVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewLocalVariable(key string) (*LocalVariable, error) { - name := key[len("local."):] - if idx := strings.Index(name, "."); idx > -1 { - return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name) - } - - return &LocalVariable{ - Name: name, - }, nil -} - -func (v *LocalVariable) FullKey() string { - return fmt.Sprintf("local.%s", v.Name) -} - -func (v *LocalVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -// DetectVariables takes an AST root and returns all the interpolated -// variables that are detected in the AST tree. -func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { - var result []InterpolatedVariable - var resultErr error - - // Visitor callback - fn := func(n ast.Node) ast.Node { - if resultErr != nil { - return n - } - - switch vn := n.(type) { - case *ast.VariableAccess: - v, err := NewInterpolatedVariable(vn.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - case *ast.Index: - if va, ok := vn.Target.(*ast.VariableAccess); ok { - v, err := NewInterpolatedVariable(va.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - } - if va, ok := vn.Key.(*ast.VariableAccess); ok { - v, err := NewInterpolatedVariable(va.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - } - default: - return n - } - - return n - } - - // Visitor pattern - root.Accept(fn) - - if resultErr != nil { - return nil, resultErr - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go deleted file mode 100644 index f152d800..00000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ /dev/null @@ -1,282 +0,0 @@ -package config - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/mitchellh/reflectwalk" -) - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - // F is the function to call for every interpolation. It can be nil. - // - // If Replace is true, then the return value of F will be used to - // replace the interpolation. - F interpolationWalkerFunc - Replace bool - - // ContextF is an advanced version of F that also receives the - // location of where it is in the structure. This lets you do - // context-aware validation. - ContextF interpolationWalkerContextFunc - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex []int - unknownKeys []string -} - -// interpolationWalkerFunc is the callback called by interpolationWalk. -// It is called with any interpolation found. It should return a value -// to replace the interpolation with, along with any errors. -// -// If Replace is set to false in interpolationWalker, then the replace -// value can be anything as it will have no effect. -type interpolationWalkerFunc func(ast.Node) (interface{}, error) - -// interpolationWalkerContextFunc is called by interpolationWalk if -// ContextF is set. This receives both the interpolation and the location -// where the interpolation is. -// -// This callback can be used to validate the location of the interpolation -// within the configuration. -type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - - if l := len(w.sliceIndex); l > 0 { - w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String())) - } else { - w.key = append(w.key, k.String()) - } - - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = append(w.sliceIndex, i) - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := hil.Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.ContextF != nil { - w.ContextF(w.loc, astRoot) - } - - if w.F == nil { - return nil - } - - replaceVal, err := w.F(astRoot) - if err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if w.Replace { - // We need to determine if we need to remove this element - // if the result contains any "UnknownVariableValue" which is - // set if it is computed. This behavior is different if we're - // splitting (in a SliceElem) or not. - remove := false - if w.loc == reflectwalk.SliceElem { - switch typedReplaceVal := replaceVal.(type) { - case string: - if typedReplaceVal == hcl2shim.UnknownVariableValue { - remove = true - } - case []interface{}: - if hasUnknownValue(typedReplaceVal) { - remove = true - } - } - } else if replaceVal == hcl2shim.UnknownVariableValue { - remove = true - } - - if remove { - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - } - - resultVal := reflect.ValueOf(replaceVal) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - // if we don't have at least 2 values, we're not going to find a map, but - // we could panic. - if len(w.cs) < 2 { - return - } - - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func hasUnknownValue(variable []interface{}) bool { - for _, value := range variable { - if strVal, ok := value.(string); ok { - if strVal == hcl2shim.UnknownVariableValue { - return true - } - } - } - return false -} - -func (w *interpolationWalker) splitSlice() { - raw := w.cs[len(w.cs)-1] - - var s []interface{} - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - } - - split := false - for _, val := range s { - if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList { - split = true - } - if _, ok := val.([]interface{}); ok { - split = true - } - } - - if !split { - return - } - - result := make([]interface{}, 0) - for _, v := range s { - switch val := v.(type) { - case ast.Variable: - switch val.Type { - case ast.TypeList: - elements := val.Value.([]ast.Variable) - for _, element := range elements { - result = append(result, element.Value) - } - default: - result = append(result, val.Value) - } - case []interface{}: - result = append(result, val...) - default: - result = append(result, v) - } - } - - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go deleted file mode 100644 index 612e25b9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ /dev/null @@ -1,212 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/hcl" -) - -// ErrNoConfigsFound is the error returned by LoadDir if no -// Terraform configuration files were found in the given directory. -type ErrNoConfigsFound struct { - Dir string -} - -func (e ErrNoConfigsFound) Error() string { - return fmt.Sprintf( - "No Terraform configuration files found in directory: %s", - e.Dir) -} - -// LoadJSON loads a single Terraform configuration from a given JSON document. -// -// The document must be a complete Terraform configuration. This function will -// NOT try to load any additional modules so only the given document is loaded. -func LoadJSON(raw json.RawMessage) (*Config, error) { - obj, err := hcl.Parse(string(raw)) - if err != nil { - return nil, fmt.Errorf( - "Error parsing JSON document as HCL: %s", err) - } - - // Start building the result - hclConfig := &hclConfigurable{ - Root: obj, - } - - return hclConfig.Config() -} - -// LoadFile loads the Terraform configuration from a given file. -// -// This file can be any format that Terraform recognizes, and import any -// other format that Terraform recognizes. -func LoadFile(path string) (*Config, error) { - importTree, err := loadTree(path) - if err != nil { - return nil, err - } - - configTree, err := importTree.ConfigTree() - - // Close the importTree now so that we can clear resources as quickly - // as possible. - importTree.Close() - - if err != nil { - return nil, err - } - - return configTree.Flatten() -} - -// LoadDir loads all the Terraform configuration files in a single -// directory and appends them together. -// -// Special files known as "override files" can also be present, which -// are merged into the loaded configuration. That is, the non-override -// files are loaded first to create the configuration. Then, the overrides -// are merged into the configuration to create the final configuration. -// -// Files are loaded in lexical order. -func LoadDir(root string) (*Config, error) { - files, overrides, err := dirFiles(root) - if err != nil { - return nil, err - } - if len(files) == 0 && len(overrides) == 0 { - return nil, &ErrNoConfigsFound{Dir: root} - } - - // Determine the absolute path to the directory. - rootAbs, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - var result *Config - - // Sort the files and overrides so we have a deterministic order - sort.Strings(files) - sort.Strings(overrides) - - // Load all the regular files, append them to each other. - for _, f := range files { - c, err := LoadFile(f) - if err != nil { - return nil, err - } - - if result != nil { - result, err = Append(result, c) - if err != nil { - return nil, err - } - } else { - result = c - } - } - if len(files) == 0 { - result = &Config{} - } - - // Load all the overrides, and merge them into the config - for _, f := range overrides { - c, err := LoadFile(f) - if err != nil { - return nil, err - } - - result, err = Merge(result, c) - if err != nil { - return nil, err - } - } - - // Mark the directory - result.Dir = rootAbs - - return result, nil -} - -// Ext returns the Terraform configuration extension of the given -// path, or a blank string if it is an invalid function. -func ext(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -func dirFiles(dir string) ([]string, []string, error) { - f, err := os.Open(dir) - if err != nil { - return nil, nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, nil, err - } - if !fi.IsDir() { - return nil, nil, fmt.Errorf( - "configuration path must be a directory: %s", - dir) - } - - var files, overrides []string - err = nil - for err != io.EOF { - var fis []os.FileInfo - fis, err = f.Readdir(128) - if err != nil && err != io.EOF { - return nil, nil, err - } - - for _, fi := range fis { - // Ignore directories - if fi.IsDir() { - continue - } - - // Only care about files that are valid to load - name := fi.Name() - extValue := ext(name) - if extValue == "" || IsIgnoredFile(name) { - continue - } - - // Determine if we're dealing with an override - nameNoExt := name[:len(name)-len(extValue)] - override := nameNoExt == "override" || - strings.HasSuffix(nameNoExt, "_override") - - path := filepath.Join(dir, name) - if override { - overrides = append(overrides, path) - } else { - files = append(files, path) - } - } - } - - return files, overrides, nil -} - -// IsIgnoredFile returns true or false depending on whether the -// provided file name is a file that should be ignored. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go deleted file mode 100644 index 68cffe2c..00000000 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ /dev/null @@ -1,1270 +0,0 @@ -package config - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" -) - -// hclConfigurable is an implementation of configurable that knows -// how to turn HCL configuration into a *Config object. -type hclConfigurable struct { - File string - Root *ast.File -} - -var ReservedDataSourceFields = []string{ - "connection", - "count", - "depends_on", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedResourceFields = []string{ - "connection", - "count", - "depends_on", - "id", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedProviderFields = []string{ - "alias", - "version", -} - -func (t *hclConfigurable) Config() (*Config, error) { - validKeys := map[string]struct{}{ - "atlas": struct{}{}, - "data": struct{}{}, - "locals": struct{}{}, - "module": struct{}{}, - "output": struct{}{}, - "provider": struct{}{}, - "resource": struct{}{}, - "terraform": struct{}{}, - "variable": struct{}{}, - } - - // Top-level item should be the object list - list, ok := t.Root.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("error parsing: file doesn't contain a root object") - } - - // Start building up the actual configuration. - config := new(Config) - - // Terraform config - if o := list.Filter("terraform"); len(o.Items) > 0 { - var err error - config.Terraform, err = loadTerraformHcl(o) - if err != nil { - return nil, err - } - } - - // Build the variables - if vars := list.Filter("variable"); len(vars.Items) > 0 { - var err error - config.Variables, err = loadVariablesHcl(vars) - if err != nil { - return nil, err - } - } - - // Build local values - if locals := list.Filter("locals"); len(locals.Items) > 0 { - var err error - config.Locals, err = loadLocalsHcl(locals) - if err != nil { - return nil, err - } - } - - // Get Atlas configuration - if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { - var err error - config.Atlas, err = loadAtlasHcl(atlas) - if err != nil { - return nil, err - } - } - - // Build the modules - if modules := list.Filter("module"); len(modules.Items) > 0 { - var err error - config.Modules, err = loadModulesHcl(modules) - if err != nil { - return nil, err - } - } - - // Build the provider configs - if providers := list.Filter("provider"); len(providers.Items) > 0 { - var err error - config.ProviderConfigs, err = loadProvidersHcl(providers) - if err != nil { - return nil, err - } - } - - // Build the resources - { - var err error - managedResourceConfigs := list.Filter("resource") - dataResourceConfigs := list.Filter("data") - - config.Resources = make( - []*Resource, 0, - len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items), - ) - - managedResources, err := loadManagedResourcesHcl(managedResourceConfigs) - if err != nil { - return nil, err - } - dataResources, err := loadDataResourcesHcl(dataResourceConfigs) - if err != nil { - return nil, err - } - - config.Resources = append(config.Resources, dataResources...) - config.Resources = append(config.Resources, managedResources...) - } - - // Build the outputs - if outputs := list.Filter("output"); len(outputs.Items) > 0 { - var err error - config.Outputs, err = loadOutputsHcl(outputs) - if err != nil { - return nil, err - } - } - - // Check for invalid keys - for _, item := range list.Items { - if len(item.Keys) == 0 { - // Not sure how this would happen, but let's avoid a panic - continue - } - - k := item.Keys[0].Token.Value().(string) - if _, ok := validKeys[k]; ok { - continue - } - - config.unknownKeys = append(config.unknownKeys, k) - } - - return config, nil -} - -// loadFileHcl is a fileLoaderFunc that knows how to read HCL -// files and turn them into hclConfigurables. -func loadFileHcl(root string) (configurable, []string, error) { - // Read the HCL file and prepare for parsing - d, err := ioutil.ReadFile(root) - if err != nil { - return nil, nil, fmt.Errorf( - "Error reading %s: %s", root, err) - } - - // Parse it - hclRoot, err := hcl.Parse(string(d)) - if err != nil { - return nil, nil, fmt.Errorf( - "Error parsing %s: %s", root, err) - } - - // Start building the result - result := &hclConfigurable{ - File: root, - Root: hclRoot, - } - - // Dive in, find the imports. This is disabled for now since - // imports were removed prior to Terraform 0.1. The code is - // remaining here commented for historical purposes. - /* - imports := obj.Get("import") - if imports == nil { - result.Object.Ref() - return result, nil, nil - } - - if imports.Type() != libucl.ObjectTypeString { - imports.Close() - - return nil, nil, fmt.Errorf( - "Error in %s: all 'import' declarations should be in the format\n"+ - "`import \"foo\"` (Got type %s)", - root, - imports.Type()) - } - - // Gather all the import paths - importPaths := make([]string, 0, imports.Len()) - iter := imports.Iterate(false) - for imp := iter.Next(); imp != nil; imp = iter.Next() { - path := imp.ToString() - if !filepath.IsAbs(path) { - // Relative paths are relative to the Terraform file itself - dir := filepath.Dir(root) - path = filepath.Join(dir, path) - } - - importPaths = append(importPaths, path) - imp.Close() - } - iter.Close() - imports.Close() - - result.Object.Ref() - */ - - return result, nil, nil -} - -// Given a handle to a HCL object, this transforms it into the Terraform config -func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'terraform' block allowed per module") - } - - // Get our one item - item := list.Items[0] - - // This block should have an empty top level ObjectItem. If there are keys - // here, it's likely because we have a flattened JSON object, and we can - // lift this into a nested ObjectList to decode properly. - if len(item.Keys) > 0 { - item = &ast.ObjectItem{ - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - } - - // We need the item value as an ObjectList - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("terraform block: should be an object") - } - - // NOTE: We purposely don't validate unknown HCL keys here so that - // we can potentially read _future_ Terraform version config (to - // still be able to validate the required version). - // - // We should still keep track of unknown keys to validate later, but - // HCL doesn't currently support that. - - var config Terraform - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading terraform config: %s", - err) - } - - // If we have provisioners, then parse those out - if os := listVal.Filter("backend"); len(os.Items) > 0 { - var err error - config.Backend, err = loadTerraformBackendHcl(os) - if err != nil { - return nil, fmt.Errorf( - "Error reading backend config for terraform block: %s", - err) - } - } - - return &config, nil -} - -// Loads the Backend configuration from an object list. -func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'backend' block allowed") - } - - // Get our one item - item := list.Items[0] - - // Verify the keys - if len(item.Keys) != 1 { - return nil, fmt.Errorf( - "position %s: 'backend' must be followed by exactly one string: a type", - item.Pos()) - } - - typ := item.Keys[0].Token.Value().(string) - - // Decode the raw config - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading backend config: %s", - err) - } - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading backend config: %s", - err) - } - - b := &Backend{ - Type: typ, - RawConfig: rawConfig, - } - b.Hash = b.Rehash() - - return b, nil -} - -// Given a handle to a HCL object, this transforms it into the Atlas -// configuration. -func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'atlas' block allowed") - } - - // Get our one item - item := list.Items[0] - - var config AtlasConfig - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading atlas config: %s", - err) - } - - return &config, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of modules. -// -// The resulting modules may not be unique, but each module -// represents exactly one module definition in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { - if err := assertAllBlocksHaveNames("module", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Module - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - k := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("module '%s': should be an object", k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) - } - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) - } - - // Remove the fields we handle specially - delete(config, "source") - delete(config, "version") - delete(config, "providers") - - var source string - if o := listVal.Filter("source"); len(o.Items) > 0 { - err = hcl.DecodeObject(&source, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing source for %s: %s", - k, - err) - } - } - - var version string - if o := listVal.Filter("version"); len(o.Items) > 0 { - err = hcl.DecodeObject(&version, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version for %s: %s", - k, - err) - } - } - - var providers map[string]string - if o := listVal.Filter("providers"); len(o.Items) > 0 { - err = hcl.DecodeObject(&providers, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing providers for %s: %s", - k, - err) - } - } - - result = append(result, &Module{ - Name: k, - Source: source, - Version: version, - Providers: providers, - RawConfig: rawConfig, - }) - } - - return result, nil -} - -// loadLocalsHcl recurses into the given HCL object turns it into -// a list of locals. -func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) { - - result := make([]*Local, 0, len(list.Items)) - - for _, block := range list.Items { - if len(block.Keys) > 0 { - return nil, fmt.Errorf( - "locals block at %s should not have label %q", - block.Pos(), block.Keys[0].Token.Value(), - ) - } - - blockObj, ok := block.Val.(*ast.ObjectType) - if !ok { - return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos()) - } - - // blockObj now contains directly our local decls - for _, item := range blockObj.List.Items { - if len(item.Keys) != 1 { - return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos()) - } - - // By the time we get here there can only be one item left, but - // we'll decode into a map anyway because it's a convenient way - // to extract both the key and the value robustly. - kv := map[string]interface{}{} - hcl.DecodeObject(&kv, item) - for k, v := range kv { - rawConfig, err := NewRawConfig(map[string]interface{}{ - "value": v, - }) - - if err != nil { - return nil, fmt.Errorf( - "error parsing local value %q at %s: %s", - k, item.Val.Pos(), err, - ) - } - - result = append(result, &Local{ - Name: k, - RawConfig: rawConfig, - }) - } - } - } - - return result, nil -} - -// LoadOutputsHcl recurses into the given HCL object and turns -// it into a mapping of outputs. -func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { - if err := assertAllBlocksHaveNames("output", list); err != nil { - return nil, err - } - - list = list.Children() - - // Go through each object and turn it into an actual result. - result := make([]*Output, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("output '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - // Delete special keys - delete(config, "depends_on") - delete(config, "description") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for output %s: %s", - n, - err) - } - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for output %q: %s", - n, - err) - } - } - - // If we have a description field, then filter that - var description string - if o := listVal.Filter("description"); len(o.Items) > 0 { - err := hcl.DecodeObject(&description, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading description for output %q: %s", - n, - err) - } - } - - result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, - DependsOn: dependsOn, - Description: description, - }) - } - - return result, nil -} - -// LoadVariablesHcl recurses into the given HCL object and turns -// it into a list of variables. -func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { - if err := assertAllBlocksHaveNames("variable", list); err != nil { - return nil, err - } - - list = list.Children() - - // hclVariable is the structure each variable is decoded into - type hclVariable struct { - DeclaredType string `hcl:"type"` - Default interface{} - Description string - Fields []string `hcl:",decodedFields"` - } - - // Go through each object and turn it into an actual result. - result := make([]*Variable, 0, len(list.Items)) - for _, item := range list.Items { - // Clean up items from JSON - unwrapHCLObjectKeysFromJSON(item, 1) - - // Verify the keys - if len(item.Keys) != 1 { - return nil, fmt.Errorf( - "position %s: 'variable' must be followed by exactly one strings: a name", - item.Pos()) - } - - n := item.Keys[0].Token.Value().(string) - if !NameRegexp.MatchString(n) { - return nil, fmt.Errorf( - "position %s: 'variable' name must match regular expression: %s", - item.Pos(), NameRegexp) - } - - // Check for invalid keys - valid := []string{"type", "default", "description"} - if err := checkHCLKeys(item.Val, valid); err != nil { - return nil, multierror.Prefix(err, fmt.Sprintf( - "variable[%s]:", n)) - } - - // Decode into hclVariable to get typed values - var hclVar hclVariable - if err := hcl.DecodeObject(&hclVar, item.Val); err != nil { - return nil, err - } - - // Defaults turn into a slice of map[string]interface{} and - // we need to make sure to convert that down into the - // proper type for Config. - if ms, ok := hclVar.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - - hclVar.Default = def - } - - // Build the new variable and do some basic validation - newVar := &Variable{ - Name: n, - DeclaredType: hclVar.DeclaredType, - Default: hclVar.Default, - Description: hclVar.Description, - } - if err := newVar.ValidateTypeAndDefault(); err != nil { - return nil, err - } - - result = append(result, newVar) - } - - return result, nil -} - -// LoadProvidersHcl recurses into the given HCL object and turns -// it into a mapping of provider configs. -func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { - if err := assertAllBlocksHaveNames("provider", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Go through each object and turn it into an actual result. - result := make([]*ProviderConfig, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("module '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - delete(config, "alias") - delete(config, "version") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for provider config %s: %s", - n, - err) - } - - // If we have an alias field, then add those in - var alias string - if a := listVal.Filter("alias"); len(a.Items) > 0 { - err := hcl.DecodeObject(&alias, a.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading alias for provider[%s]: %s", - n, - err) - } - } - - // If we have a version field then extract it - var version string - if a := listVal.Filter("version"); len(a.Items) > 0 { - err := hcl.DecodeObject(&version, a.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading version for provider[%s]: %s", - n, - err) - } - } - - result = append(result, &ProviderConfig{ - Name: n, - Alias: alias, - Version: version, - RawConfig: rawConfig, - }) - } - - return result, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of data sources. -// -// The resulting data sources may not be unique, but each one -// represents exactly one data definition in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { - if err := assertAllBlocksHaveNames("data", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Resource - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - if len(item.Keys) != 2 { - return nil, fmt.Errorf( - "position %s: 'data' must be followed by exactly two strings: a type and a name", - item.Pos()) - } - - t := item.Keys[0].Token.Value().(string) - k := item.Keys[1].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // Remove the fields we handle specially - delete(config, "depends_on") - delete(config, "provider") - delete(config, "count") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // If we have a count, then figure it out - var count string = "1" - if o := listVal.Filter("count"); len(o.Items) > 0 { - err = hcl.DecodeObject(&count, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) - if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have a provider, then parse it out - var provider string - if o := listVal.Filter("provider"); len(o.Items) > 0 { - err := hcl.DecodeObject(&provider, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t, - k, - err) - } - } - - result = append(result, &Resource{ - Mode: DataResourceMode, - Name: k, - Type: t, - RawCount: countConfig, - RawConfig: rawConfig, - Provider: provider, - Provisioners: []*Provisioner{}, - DependsOn: dependsOn, - Lifecycle: ResourceLifecycle{}, - }) - } - - return result, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of managed resources. -// -// The resulting resources may not be unique, but each resource -// represents exactly one "resource" block in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Resource - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - // GH-4385: We detect a pure provisioner resource and give the user - // an error about how to do it cleanly. - if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" { - return nil, fmt.Errorf( - "position %s: provisioners in a resource should be wrapped in a list\n\n"+ - "Example: \"provisioner\": [ { \"local-exec\": ... } ]", - item.Pos()) - } - - // Fix up JSON input - unwrapHCLObjectKeysFromJSON(item, 2) - - if len(item.Keys) != 2 { - return nil, fmt.Errorf( - "position %s: resource must be followed by exactly two strings, a type and a name", - item.Pos()) - } - - t := item.Keys[0].Token.Value().(string) - k := item.Keys[1].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // Remove the fields we handle specially - delete(config, "connection") - delete(config, "count") - delete(config, "depends_on") - delete(config, "provisioner") - delete(config, "provider") - delete(config, "lifecycle") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // If we have a count, then figure it out - var count string = "1" - if o := listVal.Filter("count"); len(o.Items) > 0 { - err = hcl.DecodeObject(&count, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) - if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have connection info, then parse those out - var connInfo map[string]interface{} - if o := listVal.Filter("connection"); len(o.Items) > 0 { - err := hcl.DecodeObject(&connInfo, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading connection info for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have provisioners, then parse those out - var provisioners []*Provisioner - if os := listVal.Filter("provisioner"); len(os.Items) > 0 { - var err error - provisioners, err = loadProvisionersHcl(os, connInfo) - if err != nil { - return nil, fmt.Errorf( - "Error reading provisioners for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have a provider, then parse it out - var provider string - if o := listVal.Filter("provider"); len(o.Items) > 0 { - err := hcl.DecodeObject(&provider, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t, - k, - err) - } - } - - // Check if the resource should be re-created before - // destroying the existing instance - var lifecycle ResourceLifecycle - if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { - if len(o.Items) > 1 { - return nil, fmt.Errorf( - "%s[%s]: Multiple lifecycle blocks found, expected one", - t, k) - } - - // Check for invalid keys - valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"} - if err := checkHCLKeys(o.Items[0].Val, valid); err != nil { - return nil, multierror.Prefix(err, fmt.Sprintf( - "%s[%s]:", t, k)) - } - - var raw map[string]interface{} - if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t, - k, - err) - } - - if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t, - k, - err) - } - } - - result = append(result, &Resource{ - Mode: ManagedResourceMode, - Name: k, - Type: t, - RawCount: countConfig, - RawConfig: rawConfig, - Provisioners: provisioners, - Provider: provider, - DependsOn: dependsOn, - Lifecycle: lifecycle, - }) - } - - return result, nil -} - -func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { - if err := assertAllBlocksHaveNames("provisioner", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Go through each object and turn it into an actual result. - result := make([]*Provisioner, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("provisioner '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - // Parse the "when" value - when := ProvisionerWhenCreate - if v, ok := config["when"]; ok { - switch v { - case "create": - when = ProvisionerWhenCreate - case "destroy": - when = ProvisionerWhenDestroy - default: - return nil, fmt.Errorf( - "position %s: 'provisioner' when must be 'create' or 'destroy'", - item.Pos()) - } - } - - // Parse the "on_failure" value - onFailure := ProvisionerOnFailureFail - if v, ok := config["on_failure"]; ok { - switch v { - case "continue": - onFailure = ProvisionerOnFailureContinue - case "fail": - onFailure = ProvisionerOnFailureFail - default: - return nil, fmt.Errorf( - "position %s: 'provisioner' on_failure must be 'continue' or 'fail'", - item.Pos()) - } - } - - // Delete fields we special case - delete(config, "connection") - delete(config, "when") - delete(config, "on_failure") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, err - } - - // Check if we have a provisioner-level connection - // block that overrides the resource-level - var subConnInfo map[string]interface{} - if o := listVal.Filter("connection"); len(o.Items) > 0 { - err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val) - if err != nil { - return nil, err - } - } - - // Inherit from the resource connInfo any keys - // that are not explicitly overriden. - if connInfo != nil && subConnInfo != nil { - for k, v := range connInfo { - if _, ok := subConnInfo[k]; !ok { - subConnInfo[k] = v - } - } - } else if subConnInfo == nil { - subConnInfo = connInfo - } - - // Parse the connInfo - connRaw, err := NewRawConfig(subConnInfo) - if err != nil { - return nil, err - } - - result = append(result, &Provisioner{ - Type: n, - RawConfig: rawConfig, - ConnInfo: connRaw, - When: when, - OnFailure: onFailure, - }) - } - - return result, nil -} - -/* -func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { - objects := make(map[string][]*hclobj.Object) - - for _, o := range os.Elem(false) { - for _, elem := range o.Elem(true) { - val, ok := objects[elem.Key] - if !ok { - val = make([]*hclobj.Object, 0, 1) - } - - val = append(val, elem) - objects[elem.Key] = val - } - } - - return objects -} -*/ - -// assertAllBlocksHaveNames returns an error if any of the items in -// the given object list are blocks without keys (like "module {}") -// or simple assignments (like "module = 1"). It returns nil if -// neither of these things are true. -// -// The given name is used in any generated error messages, and should -// be the name of the block we're dealing with. The given list should -// be the result of calling .Filter on an object list with that same -// name. -func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { - if elem := list.Elem(); len(elem.Items) != 0 { - switch et := elem.Items[0].Val.(type) { - case *ast.ObjectType: - pos := et.Lbrace - return fmt.Errorf("%s: %q must be followed by a name", pos, name) - default: - pos := elem.Items[0].Val.Pos() - return fmt.Errorf("%s: %q must be a configuration block", pos, name) - } - } - return nil -} - -func checkHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf( - "invalid key: %s", key)) - } - } - - return result -} - -// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when -// parsing JSON as input: if we're parsing JSON then directly nested -// items will show up as additional "keys". -// -// For objects that expect a fixed number of keys, this breaks the -// decoding process. This function unwraps the object into what it would've -// looked like if it came directly from HCL by specifying the number of keys -// you expect. -// -// Example: -// -// { "foo": { "baz": {} } } -// -// Will show up with Keys being: []string{"foo", "baz"} -// when we really just want the first two. This function will fix this. -func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) { - if len(item.Keys) > depth && item.Keys[0].Token.JSON { - for len(item.Keys) > depth { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{ - &ast.ObjectItem{ - Keys: []*ast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go deleted file mode 100644 index da7559a9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go +++ /dev/null @@ -1,473 +0,0 @@ -package config - -import ( - "fmt" - "sort" - "strings" - - hcl2 "github.com/hashicorp/hcl/v2" - gohcl2 "github.com/hashicorp/hcl/v2/gohcl" - hcl2parse "github.com/hashicorp/hcl/v2/hclparse" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" -) - -// hcl2Configurable is an implementation of configurable that knows -// how to turn a HCL Body into a *Config object. -type hcl2Configurable struct { - SourceFilename string - Body hcl2.Body -} - -// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc. -type hcl2Loader struct { - Parser *hcl2parse.Parser -} - -// For the moment we'll just have a global loader since we don't have anywhere -// better to stash this. -// TODO: refactor the loader API so that it uses some sort of object we can -// stash the parser inside. -var globalHCL2Loader = newHCL2Loader() - -// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser. -// -// HCL parsers retain information about files that are loaded to aid in -// producing diagnostic messages, so all files within a single configuration -// should be loaded with the same parser to ensure the availability of -// full diagnostic information. -func newHCL2Loader() hcl2Loader { - return hcl2Loader{ - Parser: hcl2parse.NewParser(), - } -} - -// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it -// into a hcl2Configurable. -func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) { - var f *hcl2.File - var diags hcl2.Diagnostics - if strings.HasSuffix(filename, ".json") { - f, diags = l.Parser.ParseJSONFile(filename) - } else { - f, diags = l.Parser.ParseHCLFile(filename) - } - if diags.HasErrors() { - // Return diagnostics as an error; callers may type-assert this to - // recover the original diagnostics, if it doesn't end up wrapped - // in another error. - return nil, nil, diags - } - - return &hcl2Configurable{ - SourceFilename: filename, - Body: f.Body, - }, nil, nil -} - -func (t *hcl2Configurable) Config() (*Config, error) { - config := &Config{} - - // these structs are used only for the initial shallow decoding; we'll - // expand this into the main, public-facing config structs afterwards. - type atlas struct { - Name string `hcl:"name"` - Include *[]string `hcl:"include"` - Exclude *[]string `hcl:"exclude"` - } - type provider struct { - Name string `hcl:"name,label"` - Alias *string `hcl:"alias,attr"` - Version *string `hcl:"version,attr"` - Config hcl2.Body `hcl:",remain"` - } - type module struct { - Name string `hcl:"name,label"` - Source string `hcl:"source,attr"` - Version *string `hcl:"version,attr"` - Providers *map[string]string `hcl:"providers,attr"` - Config hcl2.Body `hcl:",remain"` - } - type resourceLifecycle struct { - CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"` - PreventDestroy *bool `hcl:"prevent_destroy,attr"` - IgnoreChanges *[]string `hcl:"ignore_changes,attr"` - } - type connection struct { - Config hcl2.Body `hcl:",remain"` - } - type provisioner struct { - Type string `hcl:"type,label"` - - When *string `hcl:"when,attr"` - OnFailure *string `hcl:"on_failure,attr"` - - Connection *connection `hcl:"connection,block"` - Config hcl2.Body `hcl:",remain"` - } - type managedResource struct { - Type string `hcl:"type,label"` - Name string `hcl:"name,label"` - - CountExpr hcl2.Expression `hcl:"count,attr"` - Provider *string `hcl:"provider,attr"` - DependsOn *[]string `hcl:"depends_on,attr"` - - Lifecycle *resourceLifecycle `hcl:"lifecycle,block"` - Provisioners []provisioner `hcl:"provisioner,block"` - Connection *connection `hcl:"connection,block"` - - Config hcl2.Body `hcl:",remain"` - } - type dataResource struct { - Type string `hcl:"type,label"` - Name string `hcl:"name,label"` - - CountExpr hcl2.Expression `hcl:"count,attr"` - Provider *string `hcl:"provider,attr"` - DependsOn *[]string `hcl:"depends_on,attr"` - - Config hcl2.Body `hcl:",remain"` - } - type variable struct { - Name string `hcl:"name,label"` - - DeclaredType *string `hcl:"type,attr"` - Default *cty.Value `hcl:"default,attr"` - Description *string `hcl:"description,attr"` - Sensitive *bool `hcl:"sensitive,attr"` - } - type output struct { - Name string `hcl:"name,label"` - - ValueExpr hcl2.Expression `hcl:"value,attr"` - DependsOn *[]string `hcl:"depends_on,attr"` - Description *string `hcl:"description,attr"` - Sensitive *bool `hcl:"sensitive,attr"` - } - type locals struct { - Definitions hcl2.Attributes `hcl:",remain"` - } - type backend struct { - Type string `hcl:"type,label"` - Config hcl2.Body `hcl:",remain"` - } - type terraform struct { - RequiredVersion *string `hcl:"required_version,attr"` - Backend *backend `hcl:"backend,block"` - } - type topLevel struct { - Atlas *atlas `hcl:"atlas,block"` - Datas []dataResource `hcl:"data,block"` - Modules []module `hcl:"module,block"` - Outputs []output `hcl:"output,block"` - Providers []provider `hcl:"provider,block"` - Resources []managedResource `hcl:"resource,block"` - Terraform *terraform `hcl:"terraform,block"` - Variables []variable `hcl:"variable,block"` - Locals []*locals `hcl:"locals,block"` - } - - var raw topLevel - diags := gohcl2.DecodeBody(t.Body, nil, &raw) - if diags.HasErrors() { - // Do some minimal decoding to see if we can at least get the - // required Terraform version, which might help explain why we - // couldn't parse the rest. - if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil { - config.Terraform = &Terraform{ - RequiredVersion: *raw.Terraform.RequiredVersion, - } - } - - // We return the diags as an implementation of error, which the - // caller than then type-assert if desired to recover the individual - // diagnostics. - // FIXME: The current API gives us no way to return warnings in the - // absence of any errors. - return config, diags - } - - if raw.Terraform != nil { - var reqdVersion string - var backend *Backend - - if raw.Terraform.RequiredVersion != nil { - reqdVersion = *raw.Terraform.RequiredVersion - } - if raw.Terraform.Backend != nil { - backend = new(Backend) - backend.Type = raw.Terraform.Backend.Type - - // We don't permit interpolations or nested blocks inside the - // backend config, so we can decode the config early here and - // get direct access to the values, which is important for the - // config hashing to work as expected. - var config map[string]string - configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config) - diags = append(diags, configDiags...) - - raw := make(map[string]interface{}, len(config)) - for k, v := range config { - raw[k] = v - } - - var err error - backend.RawConfig, err = NewRawConfig(raw) - if err != nil { - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid backend configuration", - Detail: fmt.Sprintf("Error in backend configuration: %s", err), - }) - } - } - - config.Terraform = &Terraform{ - RequiredVersion: reqdVersion, - Backend: backend, - } - } - - if raw.Atlas != nil { - var include, exclude []string - if raw.Atlas.Include != nil { - include = *raw.Atlas.Include - } - if raw.Atlas.Exclude != nil { - exclude = *raw.Atlas.Exclude - } - config.Atlas = &AtlasConfig{ - Name: raw.Atlas.Name, - Include: include, - Exclude: exclude, - } - } - - for _, rawM := range raw.Modules { - m := &Module{ - Name: rawM.Name, - Source: rawM.Source, - RawConfig: NewRawConfigHCL2(rawM.Config), - } - - if rawM.Version != nil { - m.Version = *rawM.Version - } - - if rawM.Providers != nil { - m.Providers = *rawM.Providers - } - - config.Modules = append(config.Modules, m) - } - - for _, rawV := range raw.Variables { - v := &Variable{ - Name: rawV.Name, - } - if rawV.DeclaredType != nil { - v.DeclaredType = *rawV.DeclaredType - } - if rawV.Default != nil { - v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default) - } - if rawV.Description != nil { - v.Description = *rawV.Description - } - - config.Variables = append(config.Variables, v) - } - - for _, rawO := range raw.Outputs { - o := &Output{ - Name: rawO.Name, - } - - if rawO.Description != nil { - o.Description = *rawO.Description - } - if rawO.DependsOn != nil { - o.DependsOn = *rawO.DependsOn - } - if rawO.Sensitive != nil { - o.Sensitive = *rawO.Sensitive - } - - // The result is expected to be a map like map[string]interface{}{"value": something}, - // so we'll fake that with our hcl2shim.SingleAttrBody shim. - o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{ - Name: "value", - Expr: rawO.ValueExpr, - }) - - config.Outputs = append(config.Outputs, o) - } - - for _, rawR := range raw.Resources { - r := &Resource{ - Mode: ManagedResourceMode, - Type: rawR.Type, - Name: rawR.Name, - } - if rawR.Lifecycle != nil { - var l ResourceLifecycle - if rawR.Lifecycle.CreateBeforeDestroy != nil { - l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy - } - if rawR.Lifecycle.PreventDestroy != nil { - l.PreventDestroy = *rawR.Lifecycle.PreventDestroy - } - if rawR.Lifecycle.IgnoreChanges != nil { - l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges - } - r.Lifecycle = l - } - if rawR.Provider != nil { - r.Provider = *rawR.Provider - } - if rawR.DependsOn != nil { - r.DependsOn = *rawR.DependsOn - } - - var defaultConnInfo *RawConfig - if rawR.Connection != nil { - defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config) - } - - for _, rawP := range rawR.Provisioners { - p := &Provisioner{ - Type: rawP.Type, - } - - switch { - case rawP.When == nil: - p.When = ProvisionerWhenCreate - case *rawP.When == "create": - p.When = ProvisionerWhenCreate - case *rawP.When == "destroy": - p.When = ProvisionerWhenDestroy - default: - p.When = ProvisionerWhenInvalid - } - - switch { - case rawP.OnFailure == nil: - p.OnFailure = ProvisionerOnFailureFail - case *rawP.When == "fail": - p.OnFailure = ProvisionerOnFailureFail - case *rawP.When == "continue": - p.OnFailure = ProvisionerOnFailureContinue - default: - p.OnFailure = ProvisionerOnFailureInvalid - } - - if rawP.Connection != nil { - p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config) - } else { - p.ConnInfo = defaultConnInfo - } - - p.RawConfig = NewRawConfigHCL2(rawP.Config) - - r.Provisioners = append(r.Provisioners, p) - } - - // The old loader records the count expression as a weird RawConfig with - // a single-element map inside. Since the rest of the world is assuming - // that, we'll mimic it here. - { - countBody := hcl2shim.SingleAttrBody{ - Name: "count", - Expr: rawR.CountExpr, - } - - r.RawCount = NewRawConfigHCL2(countBody) - r.RawCount.Key = "count" - } - - r.RawConfig = NewRawConfigHCL2(rawR.Config) - - config.Resources = append(config.Resources, r) - - } - - for _, rawR := range raw.Datas { - r := &Resource{ - Mode: DataResourceMode, - Type: rawR.Type, - Name: rawR.Name, - } - - if rawR.Provider != nil { - r.Provider = *rawR.Provider - } - if rawR.DependsOn != nil { - r.DependsOn = *rawR.DependsOn - } - - // The old loader records the count expression as a weird RawConfig with - // a single-element map inside. Since the rest of the world is assuming - // that, we'll mimic it here. - { - countBody := hcl2shim.SingleAttrBody{ - Name: "count", - Expr: rawR.CountExpr, - } - - r.RawCount = NewRawConfigHCL2(countBody) - r.RawCount.Key = "count" - } - - r.RawConfig = NewRawConfigHCL2(rawR.Config) - - config.Resources = append(config.Resources, r) - } - - for _, rawP := range raw.Providers { - p := &ProviderConfig{ - Name: rawP.Name, - } - - if rawP.Alias != nil { - p.Alias = *rawP.Alias - } - if rawP.Version != nil { - p.Version = *rawP.Version - } - - // The result is expected to be a map like map[string]interface{}{"value": something}, - // so we'll fake that with our hcl2shim.SingleAttrBody shim. - p.RawConfig = NewRawConfigHCL2(rawP.Config) - - config.ProviderConfigs = append(config.ProviderConfigs, p) - } - - for _, rawL := range raw.Locals { - names := make([]string, 0, len(rawL.Definitions)) - for n := range rawL.Definitions { - names = append(names, n) - } - sort.Strings(names) - for _, n := range names { - attr := rawL.Definitions[n] - l := &Local{ - Name: n, - RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{ - Name: "value", - Expr: attr.Expr, - }), - } - config.Locals = append(config.Locals, l) - } - } - - // FIXME: The current API gives us no way to return warnings in the - // absence of any errors. - var err error - if diags.HasErrors() { - err = diags - } - - return config, err -} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go deleted file mode 100644 index 55fc864f..00000000 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ /dev/null @@ -1,204 +0,0 @@ -package config - -// Merge merges two configurations into a single configuration. -// -// Merge allows for the two configurations to have duplicate resources, -// because the resources will be merged. This differs from a single -// Config which must only have unique resources. -func Merge(c1, c2 *Config) (*Config, error) { - c := new(Config) - - // Merge unknown keys - unknowns := make(map[string]struct{}) - for _, k := range c1.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - for _, k := range c2.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - // Merge Atlas configuration. This is a dumb one overrides the other - // sort of merge. - c.Atlas = c1.Atlas - if c2.Atlas != nil { - c.Atlas = c2.Atlas - } - - // Merge the Terraform configuration - if c1.Terraform != nil { - c.Terraform = c1.Terraform - if c2.Terraform != nil { - c.Terraform.Merge(c2.Terraform) - } - } else { - c.Terraform = c2.Terraform - } - - // NOTE: Everything below is pretty gross. Due to the lack of generics - // in Go, there is some hoop-jumping involved to make this merging a - // little more test-friendly and less repetitive. Ironically, making it - // less repetitive involves being a little repetitive, but I prefer to - // be repetitive with things that are less error prone than things that - // are more error prone (more logic). Type conversions to an interface - // are pretty low-error. - - var m1, m2, mresult []merger - - // Modules - m1 = make([]merger, 0, len(c1.Modules)) - m2 = make([]merger, 0, len(c2.Modules)) - for _, v := range c1.Modules { - m1 = append(m1, v) - } - for _, v := range c2.Modules { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Modules = make([]*Module, len(mresult)) - for i, v := range mresult { - c.Modules[i] = v.(*Module) - } - } - - // Outputs - m1 = make([]merger, 0, len(c1.Outputs)) - m2 = make([]merger, 0, len(c2.Outputs)) - for _, v := range c1.Outputs { - m1 = append(m1, v) - } - for _, v := range c2.Outputs { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Outputs = make([]*Output, len(mresult)) - for i, v := range mresult { - c.Outputs[i] = v.(*Output) - } - } - - // Provider Configs - m1 = make([]merger, 0, len(c1.ProviderConfigs)) - m2 = make([]merger, 0, len(c2.ProviderConfigs)) - for _, v := range c1.ProviderConfigs { - m1 = append(m1, v) - } - for _, v := range c2.ProviderConfigs { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.ProviderConfigs = make([]*ProviderConfig, len(mresult)) - for i, v := range mresult { - c.ProviderConfigs[i] = v.(*ProviderConfig) - } - } - - // Resources - m1 = make([]merger, 0, len(c1.Resources)) - m2 = make([]merger, 0, len(c2.Resources)) - for _, v := range c1.Resources { - m1 = append(m1, v) - } - for _, v := range c2.Resources { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Resources = make([]*Resource, len(mresult)) - for i, v := range mresult { - c.Resources[i] = v.(*Resource) - } - } - - // Variables - m1 = make([]merger, 0, len(c1.Variables)) - m2 = make([]merger, 0, len(c2.Variables)) - for _, v := range c1.Variables { - m1 = append(m1, v) - } - for _, v := range c2.Variables { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Variables = make([]*Variable, len(mresult)) - for i, v := range mresult { - c.Variables[i] = v.(*Variable) - } - } - - // Local Values - // These are simpler than the other config elements because they are just - // flat values and so no deep merging is required. - if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 { - // Explicit length check above because we want c.Locals to remain - // nil if the result would be empty. - c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) - c.Locals = append(c.Locals, c1.Locals...) - c.Locals = append(c.Locals, c2.Locals...) - } - - return c, nil -} - -// merger is an interface that must be implemented by types that are -// merge-able. This simplifies the implementation of Merge for the various -// components of a Config. -type merger interface { - mergerName() string - mergerMerge(merger) merger -} - -// mergeSlice merges a slice of mergers. -func mergeSlice(m1, m2 []merger) []merger { - r := make([]merger, len(m1), len(m1)+len(m2)) - copy(r, m1) - - m := map[string]struct{}{} - for _, v2 := range m2 { - // If we already saw it, just append it because its a - // duplicate and invalid... - name := v2.mergerName() - if _, ok := m[name]; ok { - r = append(r, v2) - continue - } - m[name] = struct{}{} - - // Find an original to override - var original merger - originalIndex := -1 - for i, v := range m1 { - if v.mergerName() == name { - originalIndex = i - original = v - break - } - } - - var v merger - if original == nil { - v = v2 - } else { - v = original.mergerMerge(v2) - } - - if originalIndex == -1 { - r = append(r, v) - } else { - r[originalIndex] = v - } - } - - return r -} diff --git a/vendor/github.com/hashicorp/terraform/config/providers.go b/vendor/github.com/hashicorp/terraform/config/providers.go deleted file mode 100644 index eeddabc3..00000000 --- a/vendor/github.com/hashicorp/terraform/config/providers.go +++ /dev/null @@ -1,61 +0,0 @@ -package config - -import "github.com/blang/semver" - -// ProviderVersionConstraint presents a constraint for a particular -// provider, identified by its full name. -type ProviderVersionConstraint struct { - Constraint string - ProviderType string -} - -// ProviderVersionConstraints is a map from provider full name to its associated -// ProviderVersionConstraint, as produced by Config.RequiredProviders. -type ProviderVersionConstraints map[string]ProviderVersionConstraint - -// RequiredRanges returns a semver.Range for each distinct provider type in -// the constraint map. If the same provider type appears more than once -// (e.g. because aliases are in use) then their respective constraints are -// combined such that they must *all* apply. -// -// The result of this method can be passed to the -// PluginMetaSet.ConstrainVersions method within the plugin/discovery -// package in order to filter down the available plugins to those which -// satisfy the given constraints. -// -// This function will panic if any of the constraints within cannot be -// parsed as semver ranges. This is guaranteed to never happen for a -// constraint set that was built from a configuration that passed validation. -func (cons ProviderVersionConstraints) RequiredRanges() map[string]semver.Range { - ret := make(map[string]semver.Range, len(cons)) - - for _, con := range cons { - spec := semver.MustParseRange(con.Constraint) - if existing, exists := ret[con.ProviderType]; exists { - ret[con.ProviderType] = existing.AND(spec) - } else { - ret[con.ProviderType] = spec - } - } - - return ret -} - -// ProviderConfigsByFullName returns a map from provider full names (as -// returned by ProviderConfig.FullName()) to the corresponding provider -// configs. -// -// This function returns no new information than what's already in -// c.ProviderConfigs, but returns it in a more convenient shape. If there -// is more than one provider config with the same full name then the result -// is undefined, but that is guaranteed not to happen for any config that -// has passed validation. -func (c *Config) ProviderConfigsByFullName() map[string]*ProviderConfig { - ret := make(map[string]*ProviderConfig, len(c.ProviderConfigs)) - - for _, pc := range c.ProviderConfigs { - ret[pc.FullName()] = pc - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go deleted file mode 100644 index 00fd43fc..00000000 --- a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go +++ /dev/null @@ -1,40 +0,0 @@ -package config - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -var provisionerWhenStrs = map[ProvisionerWhen]string{ - ProvisionerWhenInvalid: "invalid", - ProvisionerWhenCreate: "create", - ProvisionerWhenDestroy: "destroy", -} - -func (v ProvisionerWhen) String() string { - return provisionerWhenStrs[v] -} - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{ - ProvisionerOnFailureInvalid: "invalid", - ProvisionerOnFailureContinue: "continue", - ProvisionerOnFailureFail: "fail", -} - -func (v ProvisionerOnFailure) String() string { - return provisionerOnFailureStrs[v] -} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go deleted file mode 100644 index 32d38114..00000000 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ /dev/null @@ -1,406 +0,0 @@ -package config - -import ( - "bytes" - "encoding/gob" - "errors" - "strconv" - "sync" - - hcl2 "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" -) - -// RawConfig is a structure that holds a piece of configuration -// where the overall structure is unknown since it will be used -// to configure a plugin or some other similar external component. -// -// RawConfigs can be interpolated with variables that come from -// other resources, user variables, etc. -// -// RawConfig supports a query-like interface to request -// information from deep within the structure. -type RawConfig struct { - Key string - - // Only _one_ of Raw and Body may be populated at a time. - // - // In the normal case, Raw is populated and Body is nil. - // - // When the experimental HCL2 parsing mode is enabled, "Body" - // is populated and RawConfig serves only to transport the hcl2.Body - // through the rest of Terraform core so we can ultimately decode it - // once its schema is known. - // - // Once we transition to HCL2 as the primary representation, RawConfig - // should be removed altogether and the hcl2.Body should be passed - // around directly. - - Raw map[string]interface{} - Body hcl2.Body - - Interpolations []ast.Node - Variables map[string]InterpolatedVariable - - lock sync.Mutex - config map[string]interface{} - unknownKeys []string -} - -// NewRawConfig creates a new RawConfig structure and populates the -// publicly readable struct fields. -func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { - result := &RawConfig{Raw: raw} - if err := result.init(); err != nil { - return nil, err - } - - return result, nil -} - -// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule -// to transport a hcl2.Body. In this mode, the publicly-readable struct -// fields are not populated since all operations should instead be diverted -// to the HCL2 body. -// -// For a RawConfig object constructed with this function, the only valid use -// is to later retrieve the Body value and call its own methods. Callers -// may choose to set and then later handle the Key field, in a manner -// consistent with how it is handled by the Value method, but the Value -// method itself must not be used. -// -// This is an experimental codepath to be used only by the HCL2 config loader. -// Non-experimental parsing should _always_ use NewRawConfig to produce a -// fully-functional RawConfig object. -func NewRawConfigHCL2(body hcl2.Body) *RawConfig { - return &RawConfig{ - Body: body, - } -} - -// RawMap returns a copy of the RawConfig.Raw map. -func (r *RawConfig) RawMap() map[string]interface{} { - r.lock.Lock() - defer r.lock.Unlock() - - m := make(map[string]interface{}) - for k, v := range r.Raw { - m[k] = v - } - return m -} - -// Copy returns a copy of this RawConfig, uninterpolated. -func (r *RawConfig) Copy() *RawConfig { - if r == nil { - return nil - } - - r.lock.Lock() - defer r.lock.Unlock() - - if r.Body != nil { - return NewRawConfigHCL2(r.Body) - } - - newRaw := make(map[string]interface{}) - for k, v := range r.Raw { - newRaw[k] = v - } - - result, err := NewRawConfig(newRaw) - if err != nil { - panic("copy failed: " + err.Error()) - } - - result.Key = r.Key - return result -} - -// Value returns the value of the configuration if this configuration -// has a Key set. If this does not have a Key set, nil will be returned. -func (r *RawConfig) Value() interface{} { - if c := r.Config(); c != nil { - if v, ok := c[r.Key]; ok { - return v - } - } - - r.lock.Lock() - defer r.lock.Unlock() - return r.Raw[r.Key] -} - -// Config returns the entire configuration with the variables -// interpolated from any call to Interpolate. -// -// If any interpolated variables are unknown (value set to -// UnknownVariableValue), the first non-container (map, slice, etc.) element -// will be removed from the config. The keys of unknown variables -// can be found using the UnknownKeys function. -// -// By pruning out unknown keys from the configuration, the raw -// structure will always successfully decode into its ultimate -// structure using something like mapstructure. -func (r *RawConfig) Config() map[string]interface{} { - r.lock.Lock() - defer r.lock.Unlock() - return r.config -} - -// Interpolate uses the given mapping of variable values and uses -// those as the values to replace any variables in this raw -// configuration. -// -// Any prior calls to Interpolate are replaced with this one. -// -// If a variable key is missing, this will panic. -func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { - r.lock.Lock() - defer r.lock.Unlock() - - // Create the evaluation configuration we use to execute - config := &hil.EvalConfig{ - GlobalScope: &ast.BasicScope{ - VarMap: vs, - }, - } - return r.interpolate(func(root ast.Node) (interface{}, error) { - // None of the variables we need are computed, meaning we should - // be able to properly evaluate. - result, err := hil.Eval(root, config) - if err != nil { - return "", err - } - - return result.Value, nil - }) -} - -// Merge merges another RawConfig into this one (overriding any conflicting -// values in this config) and returns a new config. The original config -// is not modified. -func (r *RawConfig) Merge(other *RawConfig) *RawConfig { - r.lock.Lock() - defer r.lock.Unlock() - - // Merge the raw configurations - raw := make(map[string]interface{}) - for k, v := range r.Raw { - raw[k] = v - } - for k, v := range other.Raw { - raw[k] = v - } - - // Create the result - result, err := NewRawConfig(raw) - if err != nil { - panic(err) - } - - // Merge the interpolated results - result.config = make(map[string]interface{}) - for k, v := range r.config { - result.config[k] = v - } - for k, v := range other.config { - result.config[k] = v - } - - // Build the unknown keys - if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 { - unknownKeys := make(map[string]struct{}) - for _, k := range r.unknownKeys { - unknownKeys[k] = struct{}{} - } - for _, k := range other.unknownKeys { - unknownKeys[k] = struct{}{} - } - - result.unknownKeys = make([]string, 0, len(unknownKeys)) - for k, _ := range unknownKeys { - result.unknownKeys = append(result.unknownKeys, k) - } - } - - return result -} - -func (r *RawConfig) init() error { - r.lock.Lock() - defer r.lock.Unlock() - - r.config = r.Raw - r.Interpolations = nil - r.Variables = nil - - fn := func(node ast.Node) (interface{}, error) { - r.Interpolations = append(r.Interpolations, node) - vars, err := DetectVariables(node) - if err != nil { - return "", err - } - - for _, v := range vars { - if r.Variables == nil { - r.Variables = make(map[string]InterpolatedVariable) - } - - r.Variables[v.FullKey()] = v - } - - return "", nil - } - - walker := &interpolationWalker{F: fn} - if err := reflectwalk.Walk(r.Raw, walker); err != nil { - return err - } - - return nil -} - -func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { - if r.Body != nil { - // For RawConfigs created for the HCL2 experiement, callers must - // use the HCL2 Body API directly rather than interpolating via - // the RawConfig. - return errors.New("this feature is not yet supported under the HCL2 experiment") - } - - config, err := copystructure.Copy(r.Raw) - if err != nil { - return err - } - r.config = config.(map[string]interface{}) - - w := &interpolationWalker{F: fn, Replace: true} - err = reflectwalk.Walk(r.config, w) - if err != nil { - return err - } - - r.unknownKeys = w.unknownKeys - return nil -} - -func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { - if r == nil && r2 == nil { - return nil - } - - if r == nil { - r = &RawConfig{} - } - - rawRaw, err := copystructure.Copy(r.Raw) - if err != nil { - panic(err) - } - - raw := rawRaw.(map[string]interface{}) - if r2 != nil { - for k, v := range r2.Raw { - raw[k] = v - } - } - - result, err := NewRawConfig(raw) - if err != nil { - panic(err) - } - - return result -} - -// couldBeInteger is a helper that determines if the represented value could -// result in an integer. -// -// This function only works for RawConfigs that have "Key" set, meaning that -// a single result can be produced. Calling this function will overwrite -// the Config and Value results to be a test value. -// -// This function is conservative. If there is some doubt about whether the -// result could be an integer -- for example, if it depends on a variable -// whose type we don't know yet -- it will still return true. -func (r *RawConfig) couldBeInteger() bool { - if r.Key == "" { - // un-keyed RawConfigs can never produce numbers - return false - } - if r.Body == nil { - // Normal path: using the interpolator in this package - // Interpolate with a fixed number to verify that its a number. - r.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.Value().(string), 0, 0) - return err == nil - } else { - // We briefly tried to gradually implement HCL2 support by adding a - // branch here, but that experiment was not successful. - panic("HCL2 experimental path no longer supported") - } -} - -// UnknownKeys returns the keys of the configuration that are unknown -// because they had interpolated variables that must be computed. -func (r *RawConfig) UnknownKeys() []string { - r.lock.Lock() - defer r.lock.Unlock() - return r.unknownKeys -} - -// See GobEncode -func (r *RawConfig) GobDecode(b []byte) error { - var data gobRawConfig - err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data) - if err != nil { - return err - } - - r.Key = data.Key - r.Raw = data.Raw - - return r.init() -} - -// GobEncode is a custom Gob encoder to use so that we only include the -// raw configuration. Interpolated variables and such are lost and the -// tree of interpolated variables is recomputed on decode, since it is -// referentially transparent. -func (r *RawConfig) GobEncode() ([]byte, error) { - r.lock.Lock() - defer r.lock.Unlock() - - data := gobRawConfig{ - Key: r.Key, - Raw: r.Raw, - } - - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(data); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type gobRawConfig struct { - Key string - Raw map[string]interface{} -} diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go deleted file mode 100644 index dd915217..00000000 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go -type ResourceMode int - -const ( - ManagedResourceMode ResourceMode = iota - DataResourceMode -) diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go deleted file mode 100644 index 01052782..00000000 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. - -package config - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ManagedResourceMode-0] - _ = x[DataResourceMode-1] -} - -const _ResourceMode_name = "ManagedResourceModeDataResourceMode" - -var _ResourceMode_index = [...]uint8{0, 19, 35} - -func (i ResourceMode) String() string { - if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go deleted file mode 100644 index 831fc778..00000000 --- a/vendor/github.com/hashicorp/terraform/config/testing.go +++ /dev/null @@ -1,17 +0,0 @@ -package config - -import ( - "testing" -) - -// TestRawConfig is used to create a RawConfig for testing. -func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { - t.Helper() - - cfg, err := NewRawConfig(c) - if err != nil { - t.Fatalf("err: %s", err) - } - - return cfg -} diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go deleted file mode 100644 index 5d8b9732..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/backend.go +++ /dev/null @@ -1,55 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Backend represents a "backend" block inside a "terraform" block in a module -// or file. -type Backend struct { - Type string - Config hcl.Body - - TypeRange hcl.Range - DeclRange hcl.Range -} - -func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { - return &Backend{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - Config: block.Body, - DeclRange: block.DefRange, - }, nil -} - -// Hash produces a hash value for the reciever that covers the type and the -// portions of the config that conform to the given schema. -// -// If the config does not conform to the schema then the result is not -// meaningful for comparison since it will be based on an incomplete result. -// -// As an exception, required attributes in the schema are treated as optional -// for the purpose of hashing, so that an incomplete configuration can still -// be hashed. Other errors, such as extraneous attributes, have no such special -// case. -func (b *Backend) Hash(schema *configschema.Block) int { - // Don't fail if required attributes are not set. Instead, we'll just - // hash them as nulls. - schema = schema.NoneRequired() - spec := schema.DecoderSpec() - val, _ := hcldec.Decode(b.Config, spec, nil) - if val == cty.NilVal { - val = cty.UnknownVal(schema.ImpliedType()) - } - - toHash := cty.TupleVal([]cty.Value{ - cty.StringVal(b.Type), - val, - }) - - return toHash.Hash() -} diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go deleted file mode 100644 index b645ac89..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go +++ /dev/null @@ -1,164 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// ------------------------------------------------------------------------- -// Functions in this file are compatibility shims intended to ease conversion -// from the old configuration loader. Any use of these functions that makes -// a change should generate a deprecation warning explaining to the user how -// to update their code for new patterns. -// -// Shims are particularly important for any patterns that have been widely -// documented in books, tutorials, etc. Users will still be starting from -// these examples and we want to help them adopt the latest patterns rather -// than leave them stranded. -// ------------------------------------------------------------------------- - -// shimTraversalInString takes any arbitrary expression and checks if it is -// a quoted string in the native syntax. If it _is_, then it is parsed as a -// traversal and re-wrapped into a synthetic traversal expression and a -// warning is generated. Otherwise, the given expression is just returned -// verbatim. -// -// This function has no effect on expressions from the JSON syntax, since -// traversals in strings are the required pattern in that syntax. -// -// If wantKeyword is set, the generated warning diagnostic will talk about -// keywords rather than references. The behavior is otherwise unchanged, and -// the caller remains responsible for checking that the result is indeed -// a keyword, e.g. using hcl.ExprAsKeyword. -func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { - // ObjectConsKeyExpr is a special wrapper type used for keys on object - // constructors to deal with the fact that naked identifiers are normally - // handled as "bareword" strings rather than as variable references. Since - // we know we're interpreting as a traversal anyway (and thus it won't - // matter whether it's a string or an identifier) we can safely just unwrap - // here and then process whatever we find inside as normal. - if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { - expr = ocke.Wrapped - } - - if !exprIsNativeQuotedString(expr) { - return expr, nil - } - - strVal, diags := expr.Value(nil) - if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { - // Since we're not even able to attempt a shim here, we'll discard - // the diagnostics we saw so far and let the caller's own error - // handling take care of reporting the invalid expression. - return expr, nil - } - - // The position handling here isn't _quite_ right because it won't - // take into account any escape sequences in the literal string, but - // it should be close enough for any error reporting to make sense. - srcRange := expr.Range() - startPos := srcRange.Start // copy - startPos.Column++ // skip initial quote - startPos.Byte++ // skip initial quote - - traversal, tDiags := hclsyntax.ParseTraversalAbs( - []byte(strVal.AsString()), - srcRange.Filename, - startPos, - ) - diags = append(diags, tDiags...) - - if wantKeyword { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted keywords are deprecated", - Detail: "In this context, keywords are expected literally rather than in quotes. Terraform 0.11 and earlier required quotes, but quoted keywords are now deprecated and will be removed in a future version of Terraform. Remove the quotes surrounding this keyword to silence this warning.", - Subject: &srcRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted references are deprecated", - Detail: "In this context, references are expected literally rather than in quotes. Terraform 0.11 and earlier required quotes, but quoted references are now deprecated and will be removed in a future version of Terraform. Remove the quotes surrounding this reference to silence this warning.", - Subject: &srcRange, - }) - } - - return &hclsyntax.ScopeTraversalExpr{ - Traversal: traversal, - SrcRange: srcRange, - }, diags -} - -// shimIsIgnoreChangesStar returns true if the given expression seems to be -// a string literal whose value is "*". This is used to support a legacy -// form of ignore_changes = all . -// -// This function does not itself emit any diagnostics, so it's the caller's -// responsibility to emit a warning diagnostic when this function returns true. -func shimIsIgnoreChangesStar(expr hcl.Expression) bool { - val, valDiags := expr.Value(nil) - if valDiags.HasErrors() { - return false - } - if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { - return false - } - return val.AsString() == "*" -} - -// warnForDeprecatedInterpolations returns warning diagnostics if the given -// body can be proven to contain attributes whose expressions are native -// syntax expressions consisting entirely of a single template interpolation, -// which is a deprecated way to include a non-literal value in configuration. -// -// This is a best-effort sort of thing which relies on the physical HCL native -// syntax AST, so it might not catch everything. The main goal is to catch the -// "obvious" cases in order to help spread awareness that this old form is -// deprecated, when folks copy it from older examples they've found on the -// internet that were written for Terraform 0.11 or earlier. -func warnForDeprecatedInterpolationsInBody(body hcl.Body) hcl.Diagnostics { - var diags hcl.Diagnostics - - nativeBody, ok := body.(*hclsyntax.Body) - if !ok { - // If it's not native syntax then we've nothing to do here. - return diags - } - - for _, attr := range nativeBody.Attributes { - moreDiags := warnForDeprecatedInterpolationsInExpr(attr.Expr) - diags = append(diags, moreDiags...) - } - - for _, block := range nativeBody.Blocks { - // We'll also go hunting in nested blocks - moreDiags := warnForDeprecatedInterpolationsInBody(block.Body) - diags = append(diags, moreDiags...) - } - - return diags -} - -func warnForDeprecatedInterpolationsInExpr(expr hcl.Expression) hcl.Diagnostics { - var diags hcl.Diagnostics - - if _, ok := expr.(*hclsyntax.TemplateWrapExpr); !ok { - // We're only interested in TemplateWrapExpr, because that's how - // the HCL native syntax parser represents the case of a template - // that consists entirely of a single interpolation expression, which - // is therefore subject to the special case of passing through the - // inner value without conversion to string. - return diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Interpolation-only expressions are deprecated", - Detail: "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated. To silence this warning, remove the \"${ sequence from the start and the }\" sequence from the end of this expression, leaving just the inner expression.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", - Subject: expr.Range().Ptr(), - }) - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go deleted file mode 100644 index a99e90ca..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/config.go +++ /dev/null @@ -1,348 +0,0 @@ -package configs - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/internal/getproviders" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -// -// A module tree described in *this* package represents the static tree -// represented by configuration. During evaluation a static ModuleNode may -// expand into zero or more module instances depending on the use of count and -// for_each configuration attributes within each call. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *Module - - // CallRange is the source range for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallRange hcl.Range - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // SourceAddrRange is the location in the configuration source where the - // SourceAddr value was set, for use in diagnostic messages. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddrRange hcl.Range - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// NewEmptyConfig constructs a single-node configuration tree with an empty -// root module. This is generally a pretty useless thing to do, so most callers -// should instead use BuildConfig. -func NewEmptyConfig() *Config { - ret := &Config{} - ret.Root = ret - ret.Children = make(map[string]*Config) - ret.Module = &Module{} - return ret -} - -// Depth returns the number of "hops" the receiver is from the root of its -// module tree, with the root module having a depth of zero. -func (c *Config) Depth() int { - ret := 0 - this := c - for this.Parent != nil { - ret++ - this = this.Parent - } - return ret -} - -// DeepEach calls the given function once for each module in the tree, starting -// with the receiver. -// -// A parent is always called before its children and children of a particular -// node are visited in lexicographic order by their names. -func (c *Config) DeepEach(cb func(c *Config)) { - cb(c) - - names := make([]string, 0, len(c.Children)) - for name := range c.Children { - names = append(names, name) - } - - for _, name := range names { - c.Children[name].DeepEach(cb) - } -} - -// AllModules returns a slice of all the receiver and all of its descendent -// nodes in the module tree, in the same order they would be visited by -// DeepEach. -func (c *Config) AllModules() []*Config { - var ret []*Config - c.DeepEach(func(c *Config) { - ret = append(ret, c) - }) - return ret -} - -// Descendent returns the descendent config that has the given path beneath -// the receiver, or nil if there is no such module. -// -// The path traverses the static module tree, prior to any expansion to handle -// count and for_each arguments. -// -// An empty path will just return the receiver, and is therefore pointless. -func (c *Config) Descendent(path addrs.Module) *Config { - current := c - for _, name := range path { - current = current.Children[name] - if current == nil { - return nil - } - } - return current -} - -// DescendentForInstance is like Descendent except that it accepts a path -// to a particular module instance in the dynamic module graph, returning -// the node from the static module graph that corresponds to it. -// -// All instances created by a particular module call share the same -// configuration, so the keys within the given path are disregarded. -func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { - current := c - for _, step := range path { - current = current.Children[step.Name] - if current == nil { - return nil - } - } - return current -} - -// ProviderRequirements searches the full tree of modules under the receiver -// for both explicit and implicit dependencies on providers. -// -// The result is a full manifest of all of the providers that must be available -// in order to work with the receiving configuration. -// -// If the returned diagnostics includes errors then the resulting Requirements -// may be incomplete. -func (c *Config) ProviderRequirements() (getproviders.Requirements, hcl.Diagnostics) { - reqs := make(getproviders.Requirements) - diags := c.addProviderRequirements(reqs) - return reqs, diags -} - -// addProviderRequirements is the main part of the ProviderRequirements -// implementation, gradually mutating a shared requirements object to -// eventually return. -func (c *Config) addProviderRequirements(reqs getproviders.Requirements) hcl.Diagnostics { - var diags hcl.Diagnostics - - // First we'll deal with the requirements directly in _our_ module... - for _, providerReqs := range c.Module.ProviderRequirements.RequiredProviders { - fqn := providerReqs.Type - if _, ok := reqs[fqn]; !ok { - // We'll at least have an unconstrained dependency then, but might - // add to this in the loop below. - reqs[fqn] = nil - } - // The model of version constraints in this package is still the - // old one using a different upstream module to represent versions, - // so we'll need to shim that out here for now. We assume this - // will always succeed because these constraints already succeeded - // parsing with the other constraint parser, which uses the same - // syntax. - constraints := getproviders.MustParseVersionConstraints(providerReqs.Requirement.Required.String()) - reqs[fqn] = append(reqs[fqn], constraints...) - } - // Each resource in the configuration creates an *implicit* provider - // dependency, though we'll only record it if there isn't already - // an explicit dependency on the same provider. - for _, rc := range c.Module.ManagedResources { - fqn := rc.Provider - if _, exists := reqs[fqn]; exists { - // Explicit dependency already present - continue - } - reqs[fqn] = nil - } - for _, rc := range c.Module.DataResources { - fqn := rc.Provider - if _, exists := reqs[fqn]; exists { - // Explicit dependency already present - continue - } - reqs[fqn] = nil - } - - // "provider" block can also contain version constraints - for _, provider := range c.Module.ProviderConfigs { - fqn := c.Module.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: provider.Name}) - if _, ok := reqs[fqn]; !ok { - // We'll at least have an unconstrained dependency then, but might - // add to this in the loop below. - reqs[fqn] = nil - } - if provider.Version.Required != nil { - constraints := getproviders.MustParseVersionConstraints(provider.Version.Required.String()) - reqs[fqn] = append(reqs[fqn], constraints...) - } - } - - // ...and now we'll recursively visit all of the child modules to merge - // in their requirements too. - for _, childConfig := range c.Children { - moreDiags := childConfig.addProviderRequirements(reqs) - diags = append(diags, moreDiags...) - } - - return diags -} - -// ProviderTypes returns the FQNs of each distinct provider type referenced -// in the receiving configuration. -// -// This is a helper for easily determining which provider types are required -// to fully interpret the configuration, though it does not include version -// information and so callers are expected to have already dealt with -// provider version selection in an earlier step and have identified suitable -// versions for each provider. -func (c *Config) ProviderTypes() []addrs.Provider { - m := make(map[addrs.Provider]struct{}) - c.gatherProviderTypes(m) - - ret := make([]addrs.Provider, 0, len(m)) - for k := range m { - ret = append(ret, k) - } - sort.Slice(ret, func(i, j int) bool { - return ret[i].String() < ret[j].String() - }) - return ret -} - -func (c *Config) gatherProviderTypes(m map[addrs.Provider]struct{}) { - if c == nil { - return - } - - for _, pc := range c.Module.ProviderConfigs { - fqn := c.Module.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: pc.Name}) - m[fqn] = struct{}{} - } - for _, rc := range c.Module.ManagedResources { - providerAddr := rc.ProviderConfigAddr() - fqn := c.Module.ProviderForLocalConfig(providerAddr) - m[fqn] = struct{}{} - } - for _, rc := range c.Module.DataResources { - providerAddr := rc.ProviderConfigAddr() - fqn := c.Module.ProviderForLocalConfig(providerAddr) - m[fqn] = struct{}{} - } - - // Must also visit our child modules, recursively. - for _, cc := range c.Children { - cc.gatherProviderTypes(m) - } -} - -// ResolveAbsProviderAddr returns the AbsProviderConfig represented by the given -// ProviderConfig address, which must not be nil or this method will panic. -// -// If the given address is already an AbsProviderConfig then this method returns -// it verbatim, and will always succeed. If it's a LocalProviderConfig then -// it will consult the local-to-FQN mapping table for the given module -// to find the absolute address corresponding to the given local one. -// -// The module address to resolve local addresses in must be given in the second -// argument, and must refer to a module that exists under the receiver or -// else this method will panic. -func (c *Config) ResolveAbsProviderAddr(addr addrs.ProviderConfig, inModule addrs.Module) addrs.AbsProviderConfig { - switch addr := addr.(type) { - - case addrs.AbsProviderConfig: - return addr - - case addrs.LocalProviderConfig: - // Find the descendent Config that contains the module that this - // local config belongs to. - mc := c.Descendent(inModule) - if mc == nil { - panic(fmt.Sprintf("ResolveAbsProviderAddr with non-existent module %s", inModule.String())) - } - - var provider addrs.Provider - if providerReq, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { - provider = providerReq.Type - } else { - provider = addrs.ImpliedProviderForUnqualifiedType(addr.LocalName) - } - - return addrs.AbsProviderConfig{ - Module: inModule, - Provider: provider, - Alias: addr.Alias, - } - - default: - panic(fmt.Sprintf("cannot ResolveAbsProviderAddr(%v, ...)", addr)) - } - -} - -// ProviderForConfigAddr returns the FQN for a given addrs.ProviderConfig, first -// by checking for the provider in module.ProviderRequirements and falling -// back to addrs.NewDefaultProvider if it is not found. -func (c *Config) ProviderForConfigAddr(addr addrs.LocalProviderConfig) addrs.Provider { - if provider, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { - return provider.Type - } - return c.ResolveAbsProviderAddr(addr, addrs.RootModule).Provider -} diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go deleted file mode 100644 index c38a6792..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/config_build.go +++ /dev/null @@ -1,180 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -// -// The result is a module tree that has so far only had basic module- and -// file-level invariants validated. If the returned diagnostics contains errors, -// the returned module tree may be incomplete but can still be used carefully -// for static analysis. -func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := map[string]*Config{} - - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - VersionConstraint: call.Version, - Parent: parent, - CallRange: call.DeclRange, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallRange: call.DeclRange, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = append(diags, modDiags...) - - ret[call.Name] = child - } - - return ret, diags -} - -// A ModuleWalker knows how to find and load a child module given details about -// the module to be loaded and a reference to its partially-loaded parent -// Config. -type ModuleWalker interface { - // LoadModule finds and loads a requested child module. - // - // If errors are detected during loading, implementations should return them - // in the diagnostics object. If the diagnostics object contains any errors - // then the caller will tolerate the returned module being nil or incomplete. - // If no errors are returned, it should be non-nil and complete. - // - // Full validation need not have been performed but an implementation should - // ensure that the basic file- and module-validations performed by the - // LoadConfigDir function (valid syntax, no namespace collisions, etc) have - // been performed before returning a module. - LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) - -// LoadModule implements ModuleWalker. -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return f(req) -} - -// ModuleRequest is used with the ModuleWalker interface to describe a child -// module that must be loaded. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // SourceAddrRange is the source range for the SourceAddr value as it - // was provided in configuration. This can and should be used to generate - // diagnostics about the source address having invalid syntax, referring - // to a non-existent object, etc. - SourceAddrRange hcl.Range - - // VersionConstraint is the version constraint applied to the module in - // configuration. This data structure includes the source range for - // the constraint, which can and should be used to generate diagnostics - // about constraint-related issues, such as constraints that eliminate all - // available versions of a module whose source is otherwise valid. - VersionConstraint VersionConstraint - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source range for the header of the "module" block - // in configuration that prompted this request. This can be used as the - // subject of an error diagnostic that relates to the module call itself, - // rather than to either its source address or its version number. - CallRange hcl.Range -} - -// DisabledModuleWalker is a ModuleWalker that doesn't support -// child modules at all, and so will return an error if asked to load one. -// -// This is provided primarily for testing. There is no good reason to use this -// in the main application. -var DisabledModuleWalker ModuleWalker - -func init() { - DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Child modules are not supported", - Detail: "Child module calls are not allowed in this context.", - Subject: &req.CallRange, - }, - } - }) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go deleted file mode 100644 index 41a53374..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go +++ /dev/null @@ -1,250 +0,0 @@ -package configschema - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// CoerceValue attempts to force the given value to conform to the type -// implied by the receiever. -// -// This is useful in situations where a configuration must be derived from -// an already-decoded value. It is always better to decode directly from -// configuration where possible since then source location information is -// still available to produce diagnostics, but in special situations this -// function allows a compatible result to be obtained even if the -// configuration objects are not available. -// -// If the given value cannot be converted to conform to the receiving schema -// then an error is returned describing one of possibly many problems. This -// error may be a cty.PathError indicating a position within the nested -// data structure where the problem applies. -func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { - var path cty.Path - return b.coerceValue(in, path) -} - -func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { - switch { - case in.IsNull(): - return cty.NullVal(b.ImpliedType()), nil - case !in.IsKnown(): - return cty.UnknownVal(b.ImpliedType()), nil - } - - ty := in.Type() - if !ty.IsObjectType() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") - } - - for name := range ty.AttributeTypes() { - if _, defined := b.Attributes[name]; defined { - continue - } - if _, defined := b.BlockTypes[name]; defined { - continue - } - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) - } - - attrs := make(map[string]cty.Value) - - for name, attrS := range b.Attributes { - var val cty.Value - switch { - case ty.HasAttribute(name): - val = in.GetAttr(name) - case attrS.Computed || attrS.Optional: - val = cty.NullVal(attrS.Type) - default: - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) - } - - val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) - if err != nil { - return cty.UnknownVal(b.ImpliedType()), err - } - - attrs[name] = val - } - for typeName, blockS := range b.BlockTypes { - switch blockS.Nesting { - - case NestingSingle, NestingGroup: - switch { - case ty.HasAttribute(typeName): - var err error - val := in.GetAttr(typeName) - attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) - if err != nil { - return cty.UnknownVal(b.ImpliedType()), err - } - default: - attrs[typeName] = blockS.EmptyValue() - } - - case NestingList: - switch { - case ty.HasAttribute(typeName): - coll := in.GetAttr(typeName) - - switch { - case coll.IsNull(): - attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) - continue - case !coll.IsKnown(): - attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) - continue - } - - if !coll.CanIterateElements() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") - } - l := coll.LengthInt() - - if l == 0 { - attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) - continue - } - elems := make([]cty.Value, 0, l) - { - path = append(path, cty.GetAttrStep{Name: typeName}) - for it := coll.ElementIterator(); it.Next(); { - var err error - idx, val := it.Element() - val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) - if err != nil { - return cty.UnknownVal(b.ImpliedType()), err - } - elems = append(elems, val) - } - } - attrs[typeName] = cty.ListVal(elems) - default: - attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) - } - - case NestingSet: - switch { - case ty.HasAttribute(typeName): - coll := in.GetAttr(typeName) - - switch { - case coll.IsNull(): - attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) - continue - case !coll.IsKnown(): - attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) - continue - } - - if !coll.CanIterateElements() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") - } - l := coll.LengthInt() - - if l == 0 { - attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) - continue - } - elems := make([]cty.Value, 0, l) - { - path = append(path, cty.GetAttrStep{Name: typeName}) - for it := coll.ElementIterator(); it.Next(); { - var err error - idx, val := it.Element() - val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) - if err != nil { - return cty.UnknownVal(b.ImpliedType()), err - } - elems = append(elems, val) - } - } - attrs[typeName] = cty.SetVal(elems) - default: - attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) - } - - case NestingMap: - switch { - case ty.HasAttribute(typeName): - coll := in.GetAttr(typeName) - - switch { - case coll.IsNull(): - attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) - continue - case !coll.IsKnown(): - attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) - continue - } - - if !coll.CanIterateElements() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") - } - l := coll.LengthInt() - if l == 0 { - attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) - continue - } - elems := make(map[string]cty.Value) - { - path = append(path, cty.GetAttrStep{Name: typeName}) - for it := coll.ElementIterator(); it.Next(); { - var err error - key, val := it.Element() - if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") - } - val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) - if err != nil { - return cty.UnknownVal(b.ImpliedType()), err - } - elems[key.AsString()] = val - } - } - - // If the attribute values here contain any DynamicPseudoTypes, - // the concrete type must be an object. - useObject := false - switch { - case coll.Type().IsObjectType(): - useObject = true - default: - // It's possible that we were given a map, and need to coerce it to an object - ety := coll.Type().ElementType() - for _, v := range elems { - if !v.Type().Equals(ety) { - useObject = true - break - } - } - } - - if useObject { - attrs[typeName] = cty.ObjectVal(elems) - } else { - attrs[typeName] = cty.MapVal(elems) - } - default: - attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) - } - - default: - // should never happen because above is exhaustive - panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) - } - } - - return cty.ObjectVal(attrs), nil -} - -func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { - val, err := convert.Convert(in, a.Type) - if err != nil { - return cty.UnknownVal(a.Type), path.NewError(err) - } - return val, nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go deleted file mode 100644 index 2c21ca5e..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go +++ /dev/null @@ -1,123 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" -) - -var mapLabelNames = []string{"key"} - -// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body -// using the facilities in the hcldec package. -// -// The returned specification is guaranteed to return a value of the same type -// returned by method ImpliedType, but it may contain null values if any of the -// block attributes are defined as optional and/or computed respectively. -func (b *Block) DecoderSpec() hcldec.Spec { - ret := hcldec.ObjectSpec{} - if b == nil { - return ret - } - - for name, attrS := range b.Attributes { - ret[name] = attrS.decoderSpec(name) - } - - for name, blockS := range b.BlockTypes { - if _, exists := ret[name]; exists { - // This indicates an invalid schema, since it's not valid to - // define both an attribute and a block type of the same name. - // However, we don't raise this here since it's checked by - // InternalValidate. - continue - } - - childSpec := blockS.Block.DecoderSpec() - - // We can only validate 0 or 1 for MinItems, because a dynamic block - // may satisfy any number of min items while only having a single - // block in the config. We cannot validate MaxItems because a - // configuration may have any number of dynamic blocks - minItems := 0 - if blockS.MinItems > 1 { - minItems = 1 - } - - switch blockS.Nesting { - case NestingSingle, NestingGroup: - ret[name] = &hcldec.BlockSpec{ - TypeName: name, - Nested: childSpec, - Required: blockS.MinItems == 1, - } - if blockS.Nesting == NestingGroup { - ret[name] = &hcldec.DefaultSpec{ - Primary: ret[name], - Default: &hcldec.LiteralSpec{ - Value: blockS.EmptyValue(), - }, - } - } - case NestingList: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockTupleSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } else { - ret[name] = &hcldec.BlockListSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } - case NestingSet: - // We forbid dynamically-typed attributes inside NestingSet in - // InternalValidate, so we don't do anything special to handle - // that here. (There is no set analog to tuple and object types, - // because cty's set implementation depends on knowing the static - // type in order to properly compute its internal hashes.) - ret[name] = &hcldec.BlockSetSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - case NestingMap: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockObjectSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } else { - ret[name] = &hcldec.BlockMapSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. - continue - } - } - - return ret -} - -func (a *Attribute) decoderSpec(name string) hcldec.Spec { - return &hcldec.AttrSpec{ - Name: name, - Type: a.Type, - Required: a.Required, - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go deleted file mode 100644 index caf8d730..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package configschema contains types for describing the expected structure -// of a configuration block whose shape is not known until runtime. -// -// For example, this is used to describe the expected contents of a resource -// configuration block, which is defined by the corresponding provider plugin -// and thus not compiled into Terraform core. -// -// A configschema primarily describes the shape of configuration, but it is -// also suitable for use with other structures derived from the configuration, -// such as the cached state of a resource or a resource diff. -// -// This package should not be confused with the package helper/schema, which -// is the higher-level helper library used to implement providers themselves. -package configschema diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go deleted file mode 100644 index 005da56b..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go +++ /dev/null @@ -1,59 +0,0 @@ -package configschema - -import ( - "github.com/zclconf/go-cty/cty" -) - -// EmptyValue returns the "empty value" for the recieving block, which for -// a block type is a non-null object where all of the attribute values are -// the empty values of the block's attributes and nested block types. -// -// In other words, it returns the value that would be returned if an empty -// block were decoded against the recieving schema, assuming that no required -// attribute or block constraints were honored. -func (b *Block) EmptyValue() cty.Value { - vals := make(map[string]cty.Value) - for name, attrS := range b.Attributes { - vals[name] = attrS.EmptyValue() - } - for name, blockS := range b.BlockTypes { - vals[name] = blockS.EmptyValue() - } - return cty.ObjectVal(vals) -} - -// EmptyValue returns the "empty value" for the receiving attribute, which is -// the value that would be returned if there were no definition of the attribute -// at all, ignoring any required constraint. -func (a *Attribute) EmptyValue() cty.Value { - return cty.NullVal(a.Type) -} - -// EmptyValue returns the "empty value" for when there are zero nested blocks -// present of the receiving type. -func (b *NestedBlock) EmptyValue() cty.Value { - switch b.Nesting { - case NestingSingle: - return cty.NullVal(b.Block.ImpliedType()) - case NestingGroup: - return b.Block.EmptyValue() - case NestingList: - if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { - return cty.EmptyTupleVal - } else { - return cty.ListValEmpty(ty) - } - case NestingMap: - if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { - return cty.EmptyObjectVal - } else { - return cty.MapValEmpty(ty) - } - case NestingSet: - return cty.SetValEmpty(b.Block.ImpliedType()) - default: - // Should never get here because the above is intended to be exhaustive, - // but we'll be robust and return a result nonetheless. - return cty.NullVal(cty.DynamicPseudoType) - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go deleted file mode 100644 index a81b7eab..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go +++ /dev/null @@ -1,42 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty.Type that would result from decoding a -// configuration block using the receiving block schema. -// -// ImpliedType always returns a result, even if the given schema is -// inconsistent. Code that creates configschema.Block objects should be -// tested using the InternalValidate method to detect any inconsistencies -// that would cause this method to fall back on defaults and assumptions. -func (b *Block) ImpliedType() cty.Type { - if b == nil { - return cty.EmptyObject - } - - return hcldec.ImpliedType(b.DecoderSpec()) -} - -// ContainsSensitive returns true if any of the attributes of the receiving -// block or any of its descendent blocks are marked as sensitive. -// -// Blocks themselves cannot be sensitive as a whole -- sensitivity is a -// per-attribute idea -- but sometimes we want to include a whole object -// decoded from a block in some UI output, and that is safe to do only if -// none of the contained attributes are sensitive. -func (b *Block) ContainsSensitive() bool { - for _, attrS := range b.Attributes { - if attrS.Sensitive { - return true - } - } - for _, blockS := range b.BlockTypes { - if blockS.ContainsSensitive() { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go deleted file mode 100644 index ebf1abba..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go +++ /dev/null @@ -1,105 +0,0 @@ -package configschema - -import ( - "fmt" - "regexp" - - "github.com/zclconf/go-cty/cty" - - multierror "github.com/hashicorp/go-multierror" -) - -var validName = regexp.MustCompile(`^[a-z0-9_]+$`) - -// InternalValidate returns an error if the receiving block and its child -// schema definitions have any consistencies with the documented rules for -// valid schema. -// -// This is intended to be used within unit tests to detect when a given -// schema is invalid. -func (b *Block) InternalValidate() error { - if b == nil { - return fmt.Errorf("top-level block schema is nil") - } - return b.internalValidate("", nil) - -} - -func (b *Block) internalValidate(prefix string, err error) error { - for name, attrS := range b.Attributes { - if attrS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) - continue - } - if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { - err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) - } - if attrS.Optional && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) - } - if attrS.Computed && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) - } - if attrS.Type == cty.NilType { - err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) - } - } - - for name, blockS := range b.BlockTypes { - if blockS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) - continue - } - - if _, isAttr := b.Attributes[name]; isAttr { - err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) - } else if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - - if blockS.MinItems < 0 || blockS.MaxItems < 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) - } - - switch blockS.Nesting { - case NestingSingle: - switch { - case blockS.MinItems != blockS.MaxItems: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) - case blockS.MinItems < 0 || blockS.MinItems > 1: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) - } - case NestingGroup: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) - } - case NestingList, NestingSet: - if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) - } - if blockS.Nesting == NestingSet { - ety := blockS.Block.ImpliedType() - if ety.HasDynamicTypes() { - // This is not permitted because the HCL (cty) set implementation - // needs to know the exact type of set elements in order to - // properly hash them, and so can't support mixed types. - err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) - } - } - case NestingMap: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) - } - default: - err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) - } - - subPrefix := prefix + name + "." - err = blockS.Block.internalValidate(subPrefix, err) - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go deleted file mode 100644 index febe743e..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. - -package configschema - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[nestingModeInvalid-0] - _ = x[NestingSingle-1] - _ = x[NestingGroup-2] - _ = x[NestingList-3] - _ = x[NestingSet-4] - _ = x[NestingMap-5] -} - -const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" - -var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} - -func (i NestingMode) String() string { - if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { - return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go deleted file mode 100644 index 0be3b8fa..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go +++ /dev/null @@ -1,38 +0,0 @@ -package configschema - -// NoneRequired returns a deep copy of the receiver with any required -// attributes translated to optional. -func (b *Block) NoneRequired() *Block { - ret := &Block{} - - if b.Attributes != nil { - ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) - } - for name, attrS := range b.Attributes { - ret.Attributes[name] = attrS.forceOptional() - } - - if b.BlockTypes != nil { - ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) - } - for name, blockS := range b.BlockTypes { - ret.BlockTypes[name] = blockS.noneRequired() - } - - return ret -} - -func (b *NestedBlock) noneRequired() *NestedBlock { - ret := *b - ret.Block = *(ret.Block.NoneRequired()) - ret.MinItems = 0 - ret.MaxItems = 0 - return &ret -} - -func (a *Attribute) forceOptional() *Attribute { - ret := *a - ret.Optional = true - ret.Required = false - return &ret -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go deleted file mode 100644 index 4d3e7cab..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go +++ /dev/null @@ -1,145 +0,0 @@ -package configschema - -import ( - "github.com/zclconf/go-cty/cty" -) - -type StringKind int - -const ( - StringPlain StringKind = iota - StringMarkdown -) - -// Block represents a configuration block. -// -// "Block" here is a logical grouping construct, though it happens to map -// directly onto the physical block syntax of Terraform's native configuration -// syntax. It may be a more a matter of convention in other syntaxes, such as -// JSON. -// -// When converted to a value, a Block always becomes an instance of an object -// type derived from its defined attributes and nested blocks -type Block struct { - // Attributes describes any attributes that may appear directly inside - // the block. - Attributes map[string]*Attribute - - // BlockTypes describes any nested block types that may appear directly - // inside the block. - BlockTypes map[string]*NestedBlock - - Description string - DescriptionKind StringKind - - Deprecated bool -} - -// Attribute represents a configuration attribute, within a block. -type Attribute struct { - // Type is a type specification that the attribute's value must conform to. - Type cty.Type - - // Description is an English-language description of the purpose and - // usage of the attribute. A description should be concise and use only - // one or two sentences, leaving full definition to longer-form - // documentation defined elsewhere. - Description string - DescriptionKind StringKind - - // Required, if set to true, specifies that an omitted or null value is - // not permitted. - Required bool - - // Optional, if set to true, specifies that an omitted or null value is - // permitted. This field conflicts with Required. - Optional bool - - // Computed, if set to true, specifies that the value comes from the - // provider rather than from configuration. If combined with Optional, - // then the config may optionally provide an overridden value. - Computed bool - - // Sensitive, if set to true, indicates that an attribute may contain - // sensitive information. - // - // At present nothing is done with this information, but callers are - // encouraged to set it where appropriate so that it may be used in the - // future to help Terraform mask sensitive information. (Terraform - // currently achieves this in a limited sense via other mechanisms.) - Sensitive bool - - Deprecated bool -} - -// NestedBlock represents the embedding of one block within another. -type NestedBlock struct { - // Block is the description of the block that's nested. - Block - - // Nesting provides the nesting mode for the child block, which determines - // how many instances of the block are allowed, how many labels it expects, - // and how the resulting data will be converted into a data structure. - Nesting NestingMode - - // MinItems and MaxItems set, for the NestingList and NestingSet nesting - // modes, lower and upper limits on the number of child blocks allowed - // of the given type. If both are left at zero, no limit is applied. - // - // As a special case, both values can be set to 1 for NestingSingle in - // order to indicate that a particular single block is required. - // - // These fields are ignored for other nesting modes and must both be left - // at zero. - MinItems, MaxItems int -} - -// NestingMode is an enumeration of modes for nesting blocks inside other -// blocks. -type NestingMode int - -//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode - -const ( - nestingModeInvalid NestingMode = iota - - // NestingSingle indicates that only a single instance of a given - // block type is permitted, with no labels, and its content should be - // provided directly as an object value. - NestingSingle - - // NestingGroup is similar to NestingSingle in that it calls for only a - // single instance of a given block type with no labels, but it additonally - // guarantees that its result will never be null, even if the block is - // absent, and instead the nested attributes and blocks will be treated - // as absent in that case. (Any required attributes or blocks within the - // nested block are not enforced unless the block is explicitly present - // in the configuration, so they are all effectively optional when the - // block is not present.) - // - // This is useful for the situation where a remote API has a feature that - // is always enabled but has a group of settings related to that feature - // that themselves have default values. By using NestingGroup instead of - // NestingSingle in that case, generated plans will show the block as - // present even when not present in configuration, thus allowing any - // default values within to be displayed to the user. - NestingGroup - - // NestingList indicates that multiple blocks of the given type are - // permitted, with no labels, and that their corresponding objects should - // be provided in a list. - NestingList - - // NestingSet indicates that multiple blocks of the given type are - // permitted, with no labels, and that their corresponding objects should - // be provided in a set. - NestingSet - - // NestingMap indicates that multiple blocks of the given type are - // permitted, each with a single label, and that their corresponding - // objects should be provided in a map whose keys are the labels. - // - // It's an error, therefore, to use the same label value on multiple - // blocks. - NestingMap -) diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go deleted file mode 100644 index 9fc2de38..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go +++ /dev/null @@ -1,173 +0,0 @@ -package configschema - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/helper/didyoumean" - "github.com/hashicorp/terraform/tfdiags" -) - -// StaticValidateTraversal checks whether the given traversal (which must be -// relative) refers to a construct in the receiving schema, returning error -// diagnostics if any problems are found. -// -// This method is "optimistic" in that it will not return errors for possible -// problems that cannot be detected statically. It is possible that an -// traversal which passed static validation will still fail when evaluated. -func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { - if !traversal.IsRelative() { - panic("StaticValidateTraversal on absolute traversal") - } - if len(traversal) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - next := traversal[0] - after := traversal[1:] - - var name string - switch step := next.(type) { - case hcl.TraverseAttr: - name = step.Name - case hcl.TraverseIndex: - // No other traversal step types are allowed directly at a block. - // If it looks like the user was trying to use index syntax to - // access an attribute then we'll produce a specialized message. - key := step.Key - if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { - maybeName := key.AsString() - if hclsyntax.ValidIdentifier(maybeName) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), - Subject: &step.SrcRange, - }) - return diags - } - } - // If it looks like some other kind of index then we'll use a generic error. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: &step.SrcRange, - }) - return diags - default: - // No other traversal types should appear in a normal valid traversal, - // but we'll handle this with a generic error anyway to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: next.SourceRange().Ptr(), - }) - return diags - } - - if attrS, exists := b.Attributes[name]; exists { - // For attribute validation we will just apply the rest of the - // traversal to an unknown value of the attribute type and pass - // through HCL's own errors, since we don't want to replicate all of - // HCL's type checking rules here. - val := cty.UnknownVal(attrS.Type) - _, hclDiags := after.TraverseRel(val) - diags = diags.Append(hclDiags) - return diags - } - - if blockS, exists := b.BlockTypes[name]; exists { - moreDiags := blockS.staticValidateTraversal(name, after) - diags = diags.Append(moreDiags) - return diags - } - - // If we get here then the name isn't valid at all. We'll collect up - // all of the names that _are_ valid to use as suggestions. - var suggestions []string - for name := range b.Attributes { - suggestions = append(suggestions, name) - } - for name := range b.BlockTypes { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unsupported attribute`, - Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), - Subject: next.SourceRange().Ptr(), - }) - - return diags -} - -func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { - if b.Nesting == NestingSingle || b.Nesting == NestingGroup { - // Single blocks are easy: just pass right through. - return b.Block.StaticValidateTraversal(traversal) - } - - if len(traversal) == 0 { - // It's always valid to access a nested block's attribute directly. - return nil - } - - var diags tfdiags.Diagnostics - next := traversal[0] - after := traversal[1:] - - switch b.Nesting { - - case NestingSet: - // Can't traverse into a set at all, since it does not have any keys - // to index with. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Cannot index a set value`, - Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), - Subject: next.SourceRange().Ptr(), - }) - return diags - - case NestingList: - if _, ok := next.(hcl.TraverseIndex); ok { - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), - Subject: next.SourceRange().Ptr(), - }) - } - return diags - - case NestingMap: - // Both attribute and index steps are valid for maps, so we'll just - // pass through here and let normal evaluation catch an - // incorrectly-typed index key later, if present. - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - return diags - - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. (Note that we handled NestingSingle separately - // back at the start of this function.) - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go deleted file mode 100644 index 036c2d6c..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/depends_on.go +++ /dev/null @@ -1,23 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { - var ret []hcl.Traversal - exprs, diags := hcl.ExprList(attr.Expr) - - for _, expr := range exprs { - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - ret = append(ret, traversal) - } - } - - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go deleted file mode 100644 index f01eb79f..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package configs contains types that represent Terraform configurations and -// the different elements thereof. -// -// The functionality in this package can be used for some static analyses of -// Terraform configurations, but this package generally exposes representations -// of the configuration source code rather than the result of evaluating these -// objects. The sibling package "lang" deals with evaluation of structures -// and expressions in the configuration. -// -// Due to its close relationship with HCL, this package makes frequent use -// of types from the HCL API, including raw HCL diagnostic messages. Such -// diagnostics can be converted into Terraform-flavored diagnostics, if needed, -// using functions in the sibling package tfdiags. -// -// The Parser type is the main entry-point into this package. The LoadConfigDir -// method can be used to load a single module directory, and then a full -// configuration (including any descendent modules) can be produced using -// the top-level BuildConfig method. -package configs diff --git a/vendor/github.com/hashicorp/terraform/configs/experiments.go b/vendor/github.com/hashicorp/terraform/configs/experiments.go deleted file mode 100644 index 8af1e951..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/experiments.go +++ /dev/null @@ -1,143 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/experiments" -) - -// sniffActiveExperiments does minimal parsing of the given body for -// "terraform" blocks with "experiments" attributes, returning the -// experiments found. -// -// This is separate from other processing so that we can be sure that all of -// the experiments are known before we process the result of the module config, -// and thus we can take into account which experiments are active when deciding -// how to decode. -func sniffActiveExperiments(body hcl.Body) (experiments.Set, hcl.Diagnostics) { - rootContent, _, diags := body.PartialContent(configFileTerraformBlockSniffRootSchema) - - ret := experiments.NewSet() - - for _, block := range rootContent.Blocks { - content, _, blockDiags := block.Body.PartialContent(configFileExperimentsSniffBlockSchema) - diags = append(diags, blockDiags...) - - attr, exists := content.Attributes["experiments"] - if !exists { - continue - } - - exps, expDiags := decodeExperimentsAttr(attr) - diags = append(diags, expDiags...) - if !expDiags.HasErrors() { - ret = experiments.SetUnion(ret, exps) - } - } - - return ret, diags -} - -func decodeExperimentsAttr(attr *hcl.Attribute) (experiments.Set, hcl.Diagnostics) { - var diags hcl.Diagnostics - - exprs, moreDiags := hcl.ExprList(attr.Expr) - diags = append(diags, moreDiags...) - if moreDiags.HasErrors() { - return nil, diags - } - - var ret = experiments.NewSet() - for _, expr := range exprs { - kw := hcl.ExprAsKeyword(expr) - if kw == "" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid experiment keyword", - Detail: "Elements of \"experiments\" must all be keywords representing active experiments.", - Subject: expr.Range().Ptr(), - }) - continue - } - - exp, err := experiments.GetCurrent(kw) - switch err := err.(type) { - case experiments.UnavailableError: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unknown experiment keyword", - Detail: fmt.Sprintf("There is no current experiment with the keyword %q.", kw), - Subject: expr.Range().Ptr(), - }) - case experiments.ConcludedError: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Experiment has concluded", - Detail: fmt.Sprintf("Experiment %q is no longer available. %s", kw, err.Message), - Subject: expr.Range().Ptr(), - }) - case nil: - // No error at all means it's valid and current. - ret.Add(exp) - - // However, experimental features are subject to breaking changes - // in future releases, so we'll warn about them to help make sure - // folks aren't inadvertently using them in places where that'd be - // inappropriate, particularly if the experiment is active in a - // shared module they depend on. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: fmt.Sprintf("Experimental feature %q is active", exp.Keyword()), - Detail: "Experimental features are subject to breaking changes in future minor or patch releases, based on feedback.\n\nIf you have feedback on the design of this feature, please open a GitHub issue to discuss it.", - Subject: expr.Range().Ptr(), - }) - - default: - // This should never happen, because GetCurrent is not documented - // to return any other error type, but we'll handle it to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid experiment keyword", - Detail: fmt.Sprintf("Could not parse %q as an experiment keyword: %s.", kw, err.Error()), - Subject: expr.Range().Ptr(), - }) - } - } - return ret, diags -} - -func checkModuleExperiments(m *Module) hcl.Diagnostics { - var diags hcl.Diagnostics - - // When we have current experiments, this is a good place to check that - // the features in question can only be used when the experiments are - // active. Return error diagnostics if a feature is being used without - // opting in to the feature. For example: - /* - if !m.ActiveExperiments.Has(experiments.ResourceForEach) { - for _, rc := range m.ManagedResources { - if rc.ForEach != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Resource for_each is experimental", - Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding resource_for_each to the list of active experiments.", - Subject: rc.ForEach.Range().Ptr(), - }) - } - } - for _, rc := range m.DataResources { - if rc.ForEach != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Resource for_each is experimental", - Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding resource_for_each to the list of active experiments.", - Subject: rc.ForEach.Range().Ptr(), - }) - } - } - } - */ - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go deleted file mode 100644 index bb4228d9..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go +++ /dev/null @@ -1,424 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "strconv" - "strings" - - "github.com/zclconf/go-cty/cty/convert" - - "github.com/zclconf/go-cty/cty" -) - -// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic -// types library that HCL2 uses) to a map compatible with what would be -// produced by the "flatmap" package. -// -// The type of the given value informs the structure of the resulting map. -// The value must be of an object type or this function will panic. -// -// Flatmap values can only represent maps when they are of primitive types, -// so the given value must not have any maps of complex types or the result -// is undefined. -func FlatmapValueFromHCL2(v cty.Value) map[string]string { - if v.IsNull() { - return nil - } - - if !v.Type().IsObjectType() { - panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) - } - - m := make(map[string]string) - flatmapValueFromHCL2Map(m, "", v) - return m -} - -func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { - ty := val.Type() - switch { - case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: - flatmapValueFromHCL2Primitive(m, key, val) - case ty.IsObjectType() || ty.IsMapType(): - flatmapValueFromHCL2Map(m, key+".", val) - case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): - flatmapValueFromHCL2Seq(m, key+".", val) - default: - panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) - } -} - -func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { - if !val.IsKnown() { - m[key] = UnknownVariableValue - return - } - if val.IsNull() { - // Omit entirely - return - } - - var err error - val, err = convert.Convert(val, cty.String) - if err != nil { - // Should not be possible, since all primitive types can convert to string. - panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) - } - m[key] = val.AsString() -} - -func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { - if val.IsNull() { - // Omit entirely - return - } - if !val.IsKnown() { - switch { - case val.Type().IsObjectType(): - // Whole objects can't be unknown in flatmap, so instead we'll - // just write all of the attribute values out as unknown. - for name, aty := range val.Type().AttributeTypes() { - flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) - } - default: - m[prefix+"%"] = UnknownVariableValue - } - return - } - - len := 0 - for it := val.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - flatmapValueFromHCL2Value(m, prefix+name, av) - len++ - } - if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed - m[prefix+"%"] = strconv.Itoa(len) - } -} - -func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { - if val.IsNull() { - // Omit entirely - return - } - if !val.IsKnown() { - m[prefix+"#"] = UnknownVariableValue - return - } - - // For sets this won't actually generate exactly what helper/schema would've - // generated, because we don't have access to the set key function it - // would've used. However, in practice it doesn't actually matter what the - // keys are as long as they are unique, so we'll just generate sequential - // indexes for them as if it were a list. - // - // An important implication of this, however, is that the set ordering will - // not be consistent across mutations and so different keys may be assigned - // to the same value when round-tripping. Since this shim is intended to - // be short-lived and not used for round-tripping, we accept this. - i := 0 - for it := val.ElementIterator(); it.Next(); { - _, av := it.Element() - key := prefix + strconv.Itoa(i) - flatmapValueFromHCL2Value(m, key, av) - i++ - } - m[prefix+"#"] = strconv.Itoa(i) -} - -// HCL2ValueFromFlatmap converts a map compatible with what would be produced -// by the "flatmap" package to a HCL2 (really, the cty dynamic types library -// that HCL2 uses) object type. -// -// The intended result type must be provided in order to guide how the -// map contents are decoded. This must be an object type or this function -// will panic. -// -// Flatmap values can only represent maps when they are of primitive types, -// so the given type must not have any maps of complex types or the result -// is undefined. -// -// The result may contain null values if the given map does not contain keys -// for all of the different key paths implied by the given type. -func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { - if m == nil { - return cty.NullVal(ty), nil - } - if !ty.IsObjectType() { - panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) - } - - return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) -} - -func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { - var val cty.Value - var err error - switch { - case ty.IsPrimitiveType(): - val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) - case ty.IsObjectType(): - val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) - case ty.IsTupleType(): - val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) - case ty.IsMapType(): - val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) - case ty.IsListType(): - val, err = hcl2ValueFromFlatmapList(m, key+".", ty) - case ty.IsSetType(): - val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) - default: - err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) - } - - if err != nil { - return cty.DynamicVal, err - } - return val, nil -} - -func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { - rawVal, exists := m[key] - if !exists { - return cty.NullVal(ty), nil - } - if rawVal == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - var err error - val := cty.StringVal(rawVal) - val, err = convert.Convert(val, ty) - if err != nil { - // This should never happen for _valid_ input, but flatmap data might - // be tampered with by the user and become invalid. - return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) - } - - return val, nil -} - -func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { - vals := make(map[string]cty.Value) - for name, aty := range atys { - val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) - if err != nil { - return cty.DynamicVal, err - } - vals[name] = val - } - return cty.ObjectVal(vals), nil -} - -func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { - var vals []cty.Value - - // if the container is unknown, there is no count string - listName := strings.TrimRight(prefix, ".") - if m[listName] == UnknownVariableValue { - return cty.UnknownVal(cty.Tuple(etys)), nil - } - - countStr, exists := m[prefix+"#"] - if !exists { - return cty.NullVal(cty.Tuple(etys)), nil - } - if countStr == UnknownVariableValue { - return cty.UnknownVal(cty.Tuple(etys)), nil - } - - count, err := strconv.Atoi(countStr) - if err != nil { - return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) - } - if count != len(etys) { - return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) - } - - vals = make([]cty.Value, len(etys)) - for i, ety := range etys { - key := prefix + strconv.Itoa(i) - val, err := hcl2ValueFromFlatmapValue(m, key, ety) - if err != nil { - return cty.DynamicVal, err - } - vals[i] = val - } - return cty.TupleVal(vals), nil -} - -func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { - vals := make(map[string]cty.Value) - ety := ty.ElementType() - - // if the container is unknown, there is no count string - listName := strings.TrimRight(prefix, ".") - if m[listName] == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - // We actually don't really care about the "count" of a map for our - // purposes here, but we do need to check if it _exists_ in order to - // recognize the difference between null (not set at all) and empty. - if strCount, exists := m[prefix+"%"]; !exists { - return cty.NullVal(ty), nil - } else if strCount == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - for fullKey := range m { - if !strings.HasPrefix(fullKey, prefix) { - continue - } - - // The flatmap format doesn't allow us to distinguish between keys - // that contain periods and nested objects, so by convention a - // map is only ever of primitive type in flatmap, and we just assume - // that the remainder of the raw key (dots and all) is the key we - // want in the result value. - key := fullKey[len(prefix):] - if key == "%" { - // Ignore the "count" key - continue - } - - val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) - if err != nil { - return cty.DynamicVal, err - } - vals[key] = val - } - - if len(vals) == 0 { - return cty.MapValEmpty(ety), nil - } - return cty.MapVal(vals), nil -} - -func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { - var vals []cty.Value - - // if the container is unknown, there is no count string - listName := strings.TrimRight(prefix, ".") - if m[listName] == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - countStr, exists := m[prefix+"#"] - if !exists { - return cty.NullVal(ty), nil - } - if countStr == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - count, err := strconv.Atoi(countStr) - if err != nil { - return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) - } - - ety := ty.ElementType() - if count == 0 { - return cty.ListValEmpty(ety), nil - } - - vals = make([]cty.Value, count) - for i := 0; i < count; i++ { - key := prefix + strconv.Itoa(i) - val, err := hcl2ValueFromFlatmapValue(m, key, ety) - if err != nil { - return cty.DynamicVal, err - } - vals[i] = val - } - - return cty.ListVal(vals), nil -} - -func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { - var vals []cty.Value - ety := ty.ElementType() - - // if the container is unknown, there is no count string - listName := strings.TrimRight(prefix, ".") - if m[listName] == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - strCount, exists := m[prefix+"#"] - if !exists { - return cty.NullVal(ty), nil - } else if strCount == UnknownVariableValue { - return cty.UnknownVal(ty), nil - } - - // Keep track of keys we've seen, se we don't add the same set value - // multiple times. The cty.Set will normally de-duplicate values, but we may - // have unknown values that would not show as equivalent. - seen := map[string]bool{} - - for fullKey := range m { - if !strings.HasPrefix(fullKey, prefix) { - continue - } - subKey := fullKey[len(prefix):] - if subKey == "#" { - // Ignore the "count" key - continue - } - key := fullKey - if dot := strings.IndexByte(subKey, '.'); dot != -1 { - key = fullKey[:dot+len(prefix)] - } - - if seen[key] { - continue - } - - seen[key] = true - - // The flatmap format doesn't allow us to distinguish between keys - // that contain periods and nested objects, so by convention a - // map is only ever of primitive type in flatmap, and we just assume - // that the remainder of the raw key (dots and all) is the key we - // want in the result value. - - val, err := hcl2ValueFromFlatmapValue(m, key, ety) - if err != nil { - return cty.DynamicVal, err - } - vals = append(vals, val) - } - - if len(vals) == 0 && strCount == "1" { - // An empty set wouldn't be represented in the flatmap, so this must be - // a single empty object since the count is actually 1. - // Add an appropriately typed null value to the set. - var val cty.Value - switch { - case ety.IsMapType(): - val = cty.MapValEmpty(ety) - case ety.IsListType(): - val = cty.ListValEmpty(ety) - case ety.IsSetType(): - val = cty.SetValEmpty(ety) - case ety.IsObjectType(): - // TODO: cty.ObjectValEmpty - objectMap := map[string]cty.Value{} - for attr, ty := range ety.AttributeTypes() { - objectMap[attr] = cty.NullVal(ty) - } - val = cty.ObjectVal(objectMap) - default: - val = cty.NullVal(ety) - } - vals = append(vals, val) - - } else if len(vals) == 0 { - return cty.SetValEmpty(ety), nil - } - - return cty.SetVal(vals), nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go deleted file mode 100644 index 3403c026..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go +++ /dev/null @@ -1,276 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -// RequiresReplace takes a list of flatmapped paths from a -// InstanceDiff.Attributes along with the corresponding cty.Type, and returns -// the list of the cty.Paths that are flagged as causing the resource -// replacement (RequiresNew). -// This will filter out redundant paths, paths that refer to flatmapped indexes -// (e.g. "#", "%"), and will return any changes within a set as the path to the -// set itself. -func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { - var paths []cty.Path - - for _, attr := range attrs { - p, err := requiresReplacePath(attr, ty) - if err != nil { - return nil, err - } - - paths = append(paths, p) - } - - // now trim off any trailing paths that aren't GetAttrSteps, since only an - // attribute itself can require replacement - paths = trimPaths(paths) - - // There may be redundant paths due to set elements or index attributes - // Do some ugly n^2 filtering, but these are always fairly small sets. - for i := 0; i < len(paths)-1; i++ { - for j := i + 1; j < len(paths); j++ { - if reflect.DeepEqual(paths[i], paths[j]) { - // swap the tail and slice it off - paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] - paths = paths[:len(paths)-1] - j-- - } - } - } - - return paths, nil -} - -// trimPaths removes any trailing steps that aren't of type GetAttrSet, since -// only an attribute itself can require replacement -func trimPaths(paths []cty.Path) []cty.Path { - var trimmed []cty.Path - for _, path := range paths { - path = trimPath(path) - if len(path) > 0 { - trimmed = append(trimmed, path) - } - } - return trimmed -} - -func trimPath(path cty.Path) cty.Path { - for len(path) > 0 { - _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) - if isGetAttr { - break - } - path = path[:len(path)-1] - } - return path -} - -// requiresReplacePath takes a key from a flatmap along with the cty.Type -// describing the structure, and returns the cty.Path that would be used to -// reference the nested value in the data structure. -// This is used specifically to record the RequiresReplace attributes from a -// ResourceInstanceDiff. -func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { - if k == "" { - return nil, nil - } - if !ty.IsObjectType() { - panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) - } - - path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) - if err != nil { - return path, fmt.Errorf("[%s] %s", k, err) - } - return path, nil -} - -func pathSplit(p string) (string, string) { - parts := strings.SplitN(p, ".", 2) - head := parts[0] - rest := "" - if len(parts) > 1 { - rest = parts[1] - } - return head, rest -} - -func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { - k, rest := pathSplit(key) - - path := cty.Path{cty.GetAttrStep{Name: k}} - - ty, ok := atys[k] - if !ok { - return path, fmt.Errorf("attribute %q not found", k) - } - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - switch { - case ty.IsPrimitiveType(): - err = fmt.Errorf("invalid step %q with type %#v", key, ty) - case ty.IsObjectType(): - path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) - case ty.IsTupleType(): - path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) - case ty.IsMapType(): - path, err = pathFromFlatmapKeyMap(key, ty) - case ty.IsListType(): - path, err = pathFromFlatmapKeyList(key, ty) - case ty.IsSetType(): - path, err = pathFromFlatmapKeySet(key, ty) - default: - err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) - } - - if err != nil { - return path, err - } - - return path, nil -} - -func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := pathSplit(key) - - // we don't need to convert the index keys to paths - if k == "#" { - return path, nil - } - - idx, err := strconv.Atoi(k) - if err != nil { - return path, err - } - - path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} - - if idx >= len(etys) { - return path, fmt.Errorf("index %s out of range in %#v", key, etys) - } - - if rest == "" { - return path, nil - } - - ty := etys[idx] - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := key, "" - if !ty.ElementType().IsPrimitiveType() { - k, rest = pathSplit(key) - } - - // we don't need to convert the index keys to paths - if k == "%" { - return path, nil - } - - path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := pathSplit(key) - - // we don't need to convert the index keys to paths - if key == "#" { - return path, nil - } - - idx, err := strconv.Atoi(k) - if err != nil { - return path, err - } - - path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { - // once we hit a set, we can't return consistent paths, so just mark the - // set as a whole changed. - return nil, nil -} - -// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for -// use in generating legacy style diffs. -func FlatmapKeyFromPath(path cty.Path) string { - var parts []string - - for _, step := range path { - switch step := step.(type) { - case cty.GetAttrStep: - parts = append(parts, step.Name) - case cty.IndexStep: - switch ty := step.Key.Type(); { - case ty == cty.String: - parts = append(parts, step.Key.AsString()) - case ty == cty.Number: - i, _ := step.Key.AsBigFloat().Int64() - parts = append(parts, strconv.Itoa(int(i))) - } - } - } - - return strings.Join(parts, ".") -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/single_attr_body.go deleted file mode 100644 index 68f48da8..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/single_attr_body.go +++ /dev/null @@ -1,85 +0,0 @@ -package hcl2shim - -import ( - "fmt" - - hcl2 "github.com/hashicorp/hcl/v2" -) - -// SingleAttrBody is a weird implementation of hcl2.Body that acts as if -// it has a single attribute whose value is the given expression. -// -// This is used to shim Resource.RawCount and Output.RawConfig to behave -// more like they do in the old HCL loader. -type SingleAttrBody struct { - Name string - Expr hcl2.Expression -} - -var _ hcl2.Body = SingleAttrBody{} - -func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - if !all { - // This should never happen because this body implementation should only - // be used by code that is aware that it's using a single-attr body. - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid attribute", - Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - return content, diags -} - -func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - var remain hcl2.Body - if all { - // If the request matched the one attribute we represent, then the - // remaining body is empty. - remain = hcl2.EmptyBody() - } else { - remain = b - } - return content, remain, diags -} - -func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { - ret := &hcl2.BodyContent{} - all := false - var diags hcl2.Diagnostics - - for _, attrS := range schema.Attributes { - if attrS.Name == b.Name { - attrs, _ := b.JustAttributes() - ret.Attributes = attrs - all = true - } else if attrS.Required { - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Missing attribute", - Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - } - - return ret, all, diags -} - -func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { - return hcl2.Attributes{ - b.Name: { - Expr: b.Expr, - Name: b.Name, - NameRange: b.Expr.Range(), - Range: b.Expr.Range(), - }, - }, nil -} - -func (b SingleAttrBody) MissingItemRange() hcl2.Range { - return b.Expr.Range() -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go deleted file mode 100644 index daeb0b8e..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go +++ /dev/null @@ -1,353 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "math/big" - - "github.com/hashicorp/hil/ast" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// UnknownVariableValue is a sentinel value that can be used -// to denote that the value of a variable is unknown at this time. -// RawConfig uses this information to build up data about -// unknown keys. -const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for -// known object values and uses the provided block schema to perform some -// additional normalization to better mimic the shape of value that the old -// HCL1/HIL-based codepaths would've produced. -// -// In particular, it discards the collections that we use to represent nested -// blocks (other than NestingSingle) if they are empty, which better mimics -// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't -// know that an unspecified block _could_ exist. -// -// The given object value must conform to the schema's implied type or this -// function will panic or produce incorrect results. -// -// This is primarily useful for the final transition from new-style values to -// terraform.ResourceConfig before calling to a legacy provider, since -// helper/schema (the old provider SDK) is particularly sensitive to these -// subtle differences within its validation code. -func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { - if v.IsNull() { - return nil - } - if !v.IsKnown() { - panic("ConfigValueFromHCL2Block used with unknown value") - } - if !v.Type().IsObjectType() { - panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) - } - - atys := v.Type().AttributeTypes() - ret := make(map[string]interface{}) - - for name := range schema.Attributes { - if _, exists := atys[name]; !exists { - continue - } - - av := v.GetAttr(name) - if av.IsNull() { - // Skip nulls altogether, to better mimic how HCL1 would behave - continue - } - ret[name] = ConfigValueFromHCL2(av) - } - - for name, blockS := range schema.BlockTypes { - if _, exists := atys[name]; !exists { - continue - } - bv := v.GetAttr(name) - if !bv.IsKnown() { - ret[name] = UnknownVariableValue - continue - } - if bv.IsNull() { - continue - } - - switch blockS.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) - - case configschema.NestingList, configschema.NestingSet: - l := bv.LengthInt() - if l == 0 { - // skip empty collections to better mimic how HCL1 would behave - continue - } - - elems := make([]interface{}, 0, l) - for it := bv.ElementIterator(); it.Next(); { - _, ev := it.Element() - if !ev.IsKnown() { - elems = append(elems, UnknownVariableValue) - continue - } - elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) - } - ret[name] = elems - - case configschema.NestingMap: - if bv.LengthInt() == 0 { - // skip empty collections to better mimic how HCL1 would behave - continue - } - - elems := make(map[string]interface{}) - for it := bv.ElementIterator(); it.Next(); { - ek, ev := it.Element() - if !ev.IsKnown() { - elems[ek.AsString()] = UnknownVariableValue - continue - } - elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) - } - ret[name] = elems - } - } - - return ret -} - -// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic -// types library that HCL2 uses) to a value type that matches what would've -// been produced from the HCL-based interpolator for an equivalent structure. -// -// This function will transform a cty null value into a Go nil value, which -// isn't a possible outcome of the HCL/HIL-based decoder and so callers may -// need to detect and reject any null values. -func ConfigValueFromHCL2(v cty.Value) interface{} { - if !v.IsKnown() { - return UnknownVariableValue - } - if v.IsNull() { - return nil - } - - switch v.Type() { - case cty.Bool: - return v.True() // like HCL.BOOL - case cty.String: - return v.AsString() // like HCL token.STRING or token.HEREDOC - case cty.Number: - // We can't match HCL _exactly_ here because it distinguishes between - // int and float values, but we'll get as close as we can by using - // an int if the number is exactly representable, and a float if not. - // The conversion to float will force precision to that of a float64, - // which is potentially losing information from the specific number - // given, but no worse than what HCL would've done in its own conversion - // to float. - - f := v.AsBigFloat() - if i, acc := f.Int64(); acc == big.Exact { - // if we're on a 32-bit system and the number is too big for 32-bit - // int then we'll fall through here and use a float64. - const MaxInt = int(^uint(0) >> 1) - const MinInt = -MaxInt - 1 - if i <= int64(MaxInt) && i >= int64(MinInt) { - return int(i) // Like HCL token.NUMBER - } - } - - f64, _ := f.Float64() - return f64 // like HCL token.FLOAT - } - - if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { - l := make([]interface{}, 0, v.LengthInt()) - it := v.ElementIterator() - for it.Next() { - _, ev := it.Element() - l = append(l, ConfigValueFromHCL2(ev)) - } - return l - } - - if v.Type().IsMapType() || v.Type().IsObjectType() { - l := make(map[string]interface{}) - it := v.ElementIterator() - for it.Next() { - ek, ev := it.Element() - cv := ConfigValueFromHCL2(ev) - if cv != nil { - l[ek.AsString()] = cv - } - } - return l - } - - // If we fall out here then we have some weird type that we haven't - // accounted for. This should never happen unless the caller is using - // capsule types, and we don't currently have any such types defined. - panic(fmt.Errorf("can't convert %#v to config value", v)) -} - -// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes -// a value as would be returned from the old interpolator and turns it into -// a cty.Value so it can be used within, for example, an HCL2 EvalContext. -func HCL2ValueFromConfigValue(v interface{}) cty.Value { - if v == nil { - return cty.NullVal(cty.DynamicPseudoType) - } - if v == UnknownVariableValue { - return cty.DynamicVal - } - - switch tv := v.(type) { - case bool: - return cty.BoolVal(tv) - case string: - return cty.StringVal(tv) - case int: - return cty.NumberIntVal(int64(tv)) - case float64: - return cty.NumberFloatVal(tv) - case []interface{}: - vals := make([]cty.Value, len(tv)) - for i, ev := range tv { - vals[i] = HCL2ValueFromConfigValue(ev) - } - return cty.TupleVal(vals) - case map[string]interface{}: - vals := map[string]cty.Value{} - for k, ev := range tv { - vals[k] = HCL2ValueFromConfigValue(ev) - } - return cty.ObjectVal(vals) - default: - // HCL/HIL should never generate anything that isn't caught by - // the above, so if we get here something has gone very wrong. - panic(fmt.Errorf("can't convert %#v to cty.Value", v)) - } -} - -func HILVariableFromHCL2Value(v cty.Value) ast.Variable { - if v.IsNull() { - // Caller should guarantee/check this before calling - panic("Null values cannot be represented in HIL") - } - if !v.IsKnown() { - return ast.Variable{ - Type: ast.TypeUnknown, - Value: UnknownVariableValue, - } - } - - switch v.Type() { - case cty.Bool: - return ast.Variable{ - Type: ast.TypeBool, - Value: v.True(), - } - case cty.Number: - v := ConfigValueFromHCL2(v) - switch tv := v.(type) { - case int: - return ast.Variable{ - Type: ast.TypeInt, - Value: tv, - } - case float64: - return ast.Variable{ - Type: ast.TypeFloat, - Value: tv, - } - default: - // should never happen - panic("invalid return value for configValueFromHCL2") - } - case cty.String: - return ast.Variable{ - Type: ast.TypeString, - Value: v.AsString(), - } - } - - if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { - l := make([]ast.Variable, 0, v.LengthInt()) - it := v.ElementIterator() - for it.Next() { - _, ev := it.Element() - l = append(l, HILVariableFromHCL2Value(ev)) - } - // If we were given a tuple then this could actually produce an invalid - // list with non-homogenous types, which we expect to be caught inside - // HIL just like a user-supplied non-homogenous list would be. - return ast.Variable{ - Type: ast.TypeList, - Value: l, - } - } - - if v.Type().IsMapType() || v.Type().IsObjectType() { - l := make(map[string]ast.Variable) - it := v.ElementIterator() - for it.Next() { - ek, ev := it.Element() - l[ek.AsString()] = HILVariableFromHCL2Value(ev) - } - // If we were given an object then this could actually produce an invalid - // map with non-homogenous types, which we expect to be caught inside - // HIL just like a user-supplied non-homogenous map would be. - return ast.Variable{ - Type: ast.TypeMap, - Value: l, - } - } - - // If we fall out here then we have some weird type that we haven't - // accounted for. This should never happen unless the caller is using - // capsule types, and we don't currently have any such types defined. - panic(fmt.Errorf("can't convert %#v to HIL variable", v)) -} - -func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { - switch v.Type { - case ast.TypeList: - vals := make([]cty.Value, len(v.Value.([]ast.Variable))) - for i, ev := range v.Value.([]ast.Variable) { - vals[i] = HCL2ValueFromHILVariable(ev) - } - return cty.TupleVal(vals) - case ast.TypeMap: - vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) - for k, ev := range v.Value.(map[string]ast.Variable) { - vals[k] = HCL2ValueFromHILVariable(ev) - } - return cty.ObjectVal(vals) - default: - return HCL2ValueFromConfigValue(v.Value) - } -} - -func HCL2TypeForHILType(hilType ast.Type) cty.Type { - switch hilType { - case ast.TypeAny: - return cty.DynamicPseudoType - case ast.TypeUnknown: - return cty.DynamicPseudoType - case ast.TypeBool: - return cty.Bool - case ast.TypeInt: - return cty.Number - case ast.TypeFloat: - return cty.Number - case ast.TypeString: - return cty.String - case ast.TypeList: - return cty.List(cty.DynamicPseudoType) - case ast.TypeMap: - return cty.Map(cty.DynamicPseudoType) - default: - return cty.NilType // equilvalent to ast.TypeInvalid - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go deleted file mode 100644 index 92f0213d..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go +++ /dev/null @@ -1,214 +0,0 @@ -package hcl2shim - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ValuesSDKEquivalent returns true if both of the given values seem equivalent -// as far as the legacy SDK diffing code would be concerned. -// -// Since SDK diffing is a fuzzy, inexact operation, this function is also -// fuzzy and inexact. It will err on the side of returning false if it -// encounters an ambiguous situation. Ambiguity is most common in the presence -// of sets because in practice it is impossible to exactly correlate -// nonequal-but-equivalent set elements because they have no identity separate -// from their value. -// -// This must be used _only_ for comparing values for equivalence within the -// SDK planning code. It is only meaningful to compare the "prior state" -// provided by Terraform Core with the "planned new state" produced by the -// legacy SDK code via shims. In particular it is not valid to use this -// function with their the config value or the "proposed new state" value -// because they contain only the subset of data that Terraform Core itself is -// able to determine. -func ValuesSDKEquivalent(a, b cty.Value) bool { - if a == cty.NilVal || b == cty.NilVal { - // We don't generally expect nils to appear, but we'll allow them - // for robustness since the data structures produced by legacy SDK code - // can sometimes be non-ideal. - return a == b // equivalent if they are _both_ nil - } - if a.RawEquals(b) { - // Easy case. We use RawEquals because we want two unknowns to be - // considered equal here, whereas "Equals" would return unknown. - return true - } - if !a.IsKnown() || !b.IsKnown() { - // Two unknown values are equivalent regardless of type. A known is - // never equivalent to an unknown. - return a.IsKnown() == b.IsKnown() - } - if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { - // Two null/zero values are equivalent regardless of type. A non-zero is - // never equivalent to a zero. - return aZero == bZero - } - - // If we get down here then we are guaranteed that both a and b are known, - // non-null values. - - aTy := a.Type() - bTy := b.Type() - switch { - case aTy.IsSetType() && bTy.IsSetType(): - return valuesSDKEquivalentSets(a, b) - case aTy.IsListType() && bTy.IsListType(): - return valuesSDKEquivalentSequences(a, b) - case aTy.IsTupleType() && bTy.IsTupleType(): - return valuesSDKEquivalentSequences(a, b) - case aTy.IsMapType() && bTy.IsMapType(): - return valuesSDKEquivalentMappings(a, b) - case aTy.IsObjectType() && bTy.IsObjectType(): - return valuesSDKEquivalentMappings(a, b) - case aTy == cty.Number && bTy == cty.Number: - return valuesSDKEquivalentNumbers(a, b) - default: - // We've now covered all the interesting cases, so anything that falls - // down here cannot be equivalent. - return false - } -} - -// valuesSDKEquivalentIsNullOrZero returns true if the given value is either -// null or is the "zero value" (in the SDK/Go sense) for its type. -func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { - if v == cty.NilVal { - return true - } - - ty := v.Type() - switch { - case !v.IsKnown(): - return false - case v.IsNull(): - return true - - // After this point, v is always known and non-null - case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): - return v.LengthInt() == 0 - case ty == cty.String: - return v.RawEquals(cty.StringVal("")) - case ty == cty.Number: - return v.RawEquals(cty.Zero) - case ty == cty.Bool: - return v.RawEquals(cty.False) - default: - // The above is exhaustive, but for robustness we'll consider anything - // else to _not_ be zero unless it is null. - return false - } -} - -// valuesSDKEquivalentSets returns true only if each of the elements in a can -// be correlated with at least one equivalent element in b and vice-versa. -// This is a fuzzy operation that prefers to signal non-equivalence if it cannot -// be certain that all elements are accounted for. -func valuesSDKEquivalentSets(a, b cty.Value) bool { - if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { - return false - } - - // Our methodology here is a little tricky, to deal with the fact that - // it's impossible to directly correlate two non-equal set elements because - // they don't have identities separate from their values. - // The approach is to count the number of equivalent elements each element - // of a has in b and vice-versa, and then return true only if each element - // in both sets has at least one equivalent. - as := a.AsValueSlice() - bs := b.AsValueSlice() - aeqs := make([]bool, len(as)) - beqs := make([]bool, len(bs)) - for ai, av := range as { - for bi, bv := range bs { - if ValuesSDKEquivalent(av, bv) { - aeqs[ai] = true - beqs[bi] = true - } - } - } - - for _, eq := range aeqs { - if !eq { - return false - } - } - for _, eq := range beqs { - if !eq { - return false - } - } - return true -} - -// valuesSDKEquivalentSequences decides equivalence for two sequence values -// (lists or tuples). -func valuesSDKEquivalentSequences(a, b cty.Value) bool { - as := a.AsValueSlice() - bs := b.AsValueSlice() - if len(as) != len(bs) { - return false - } - - for i := range as { - if !ValuesSDKEquivalent(as[i], bs[i]) { - return false - } - } - return true -} - -// valuesSDKEquivalentMappings decides equivalence for two mapping values -// (maps or objects). -func valuesSDKEquivalentMappings(a, b cty.Value) bool { - as := a.AsValueMap() - bs := b.AsValueMap() - if len(as) != len(bs) { - return false - } - - for k, av := range as { - bv, ok := bs[k] - if !ok { - return false - } - if !ValuesSDKEquivalent(av, bv) { - return false - } - } - return true -} - -// valuesSDKEquivalentNumbers decides equivalence for two number values based -// on the fact that the SDK uses int and float64 representations while -// cty (and thus Terraform Core) uses big.Float, and so we expect to lose -// precision in the round-trip. -// -// This does _not_ attempt to allow for an epsilon difference that may be -// caused by accumulated innacuracy in a float calculation, under the -// expectation that providers generally do not actually do compuations on -// floats and instead just pass string representations of them on verbatim -// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's -// a problem for the provider itself to deal with, based on its knowledge of -// the remote system, e.g. using DiffSuppressFunc. -func valuesSDKEquivalentNumbers(a, b cty.Value) bool { - if a.RawEquals(b) { - return true // easy - } - - af := a.AsBigFloat() - bf := b.AsBigFloat() - - if af.IsInt() != bf.IsInt() { - return false - } - if af.IsInt() && bf.IsInt() { - return false // a.RawEquals(b) test above is good enough for integers - } - - // The SDK supports only int and float64, so if it's not an integer - // we know that only a float64-level of precision can possibly be - // significant. - af64, _ := af.Float64() - bf64, _ := bf.Float64() - return af64 == bf64 -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go deleted file mode 100644 index bc517078..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module.go +++ /dev/null @@ -1,518 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/experiments" -) - -// Module is a container for a set of configuration constructs that are -// evaluated within a common namespace. -type Module struct { - // SourceDir is the filesystem directory that the module was loaded from. - // - // This is populated automatically only for configurations loaded with - // LoadConfigDir. If the parser is using a virtual filesystem then the - // path here will be in terms of that virtual filesystem. - - // Any other caller that constructs a module directly with NewModule may - // assign a suitable value to this attribute before using it for other - // purposes. It should be treated as immutable by all consumers of Module - // values. - SourceDir string - - CoreVersionConstraints []VersionConstraint - - ActiveExperiments experiments.Set - - Backend *Backend - ProviderConfigs map[string]*Provider - ProviderRequirements *RequiredProviders - ProviderLocalNames map[addrs.Provider]string - ProviderMetas map[addrs.Provider]*ProviderMeta - - Variables map[string]*Variable - Locals map[string]*Local - Outputs map[string]*Output - - ModuleCalls map[string]*ModuleCall - - ManagedResources map[string]*Resource - DataResources map[string]*Resource -} - -// File describes the contents of a single configuration file. -// -// Individual files are not usually used alone, but rather combined together -// with other files (conventionally, those in the same directory) to produce -// a *Module, using NewModule. -// -// At the level of an individual file we represent directly the structural -// elements present in the file, without any attempt to detect conflicting -// declarations. A File object can therefore be used for some basic static -// analysis of individual elements, but must be built into a Module to detect -// duplicate declarations. -type File struct { - CoreVersionConstraints []VersionConstraint - - ActiveExperiments experiments.Set - - Backends []*Backend - ProviderConfigs []*Provider - ProviderMetas []*ProviderMeta - RequiredProviders []*RequiredProviders - - Variables []*Variable - Locals []*Local - Outputs []*Output - - ModuleCalls []*ModuleCall - - ManagedResources []*Resource - DataResources []*Resource -} - -// NewModule takes a list of primary files and a list of override files and -// produces a *Module by combining the files together. -// -// If there are any conflicting declarations in the given files -- for example, -// if the same variable name is defined twice -- then the resulting module -// will be incomplete and error diagnostics will be returned. Careful static -// analysis of the returned Module is still possible in this case, but the -// module will probably not be semantically valid. -func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { - var diags hcl.Diagnostics - mod := &Module{ - ProviderConfigs: map[string]*Provider{}, - ProviderLocalNames: map[addrs.Provider]string{}, - Variables: map[string]*Variable{}, - Locals: map[string]*Local{}, - Outputs: map[string]*Output{}, - ModuleCalls: map[string]*ModuleCall{}, - ManagedResources: map[string]*Resource{}, - DataResources: map[string]*Resource{}, - ProviderMetas: map[addrs.Provider]*ProviderMeta{}, - } - - // Process the required_providers blocks first, to ensure that all - // resources have access to the correct provider FQNs - for _, file := range primaryFiles { - for _, r := range file.RequiredProviders { - if mod.ProviderRequirements != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate required providers configuration", - Detail: fmt.Sprintf("A module may have only one required providers configuration. The required providers were previously configured at %s.", mod.ProviderRequirements.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - mod.ProviderRequirements = r - } - } - - // If no required_providers block is configured, create a useful empty - // state to reduce nil checks elsewhere - if mod.ProviderRequirements == nil { - mod.ProviderRequirements = &RequiredProviders{ - RequiredProviders: make(map[string]*RequiredProvider), - } - } - - // Any required_providers blocks in override files replace the entire - // block for each provider - for _, file := range overrideFiles { - for _, override := range file.RequiredProviders { - for name, rp := range override.RequiredProviders { - mod.ProviderRequirements.RequiredProviders[name] = rp - } - } - } - - for _, file := range primaryFiles { - fileDiags := mod.appendFile(file) - diags = append(diags, fileDiags...) - } - - for _, file := range overrideFiles { - fileDiags := mod.mergeFile(file) - diags = append(diags, fileDiags...) - } - - diags = append(diags, checkModuleExperiments(mod)...) - - // Generate the FQN -> LocalProviderName map - mod.gatherProviderLocalNames() - - return mod, diags -} - -// ResourceByAddr returns the configuration for the resource with the given -// address, or nil if there is no such resource. -func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { - key := addr.String() - switch addr.Mode { - case addrs.ManagedResourceMode: - return m.ManagedResources[key] - case addrs.DataResourceMode: - return m.DataResources[key] - default: - return nil - } -} - -func (m *Module) appendFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - for _, constraint := range file.CoreVersionConstraints { - // If there are any conflicting requirements then we'll catch them - // when we actually check these constraints. - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - - m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments) - - for _, b := range file.Backends { - if m.Backend != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), - Subject: &b.DeclRange, - }) - continue - } - m.Backend = b - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - if existing, exists := m.ProviderConfigs[key]; exists { - if existing.Alias == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } - continue - } - m.ProviderConfigs[key] = pc - } - - for _, pm := range file.ProviderMetas { - provider := m.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: pm.Provider}) - if existing, exists := m.ProviderMetas[provider]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider_meta block", - Detail: fmt.Sprintf("A provider_meta block for provider %q was already declared at %s. Providers may only have one provider_meta block per module.", existing.Provider, existing.DeclRange), - Subject: &pm.DeclRange, - }) - } - m.ProviderMetas[provider] = pm - } - - for _, v := range file.Variables { - if existing, exists := m.Variables[v.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate variable declaration", - Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &v.DeclRange, - }) - } - m.Variables[v.Name] = v - } - - for _, l := range file.Locals { - if existing, exists := m.Locals[l.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate local value definition", - Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &l.DeclRange, - }) - } - m.Locals[l.Name] = l - } - - for _, o := range file.Outputs { - if existing, exists := m.Outputs[o.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate output definition", - Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &o.DeclRange, - }) - } - m.Outputs[o.Name] = o - } - - for _, mc := range file.ModuleCalls { - if existing, exists := m.ModuleCalls[mc.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate module call", - Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), - Subject: &mc.DeclRange, - }) - } - m.ModuleCalls[mc.Name] = mc - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - if existing, exists := m.ManagedResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.ManagedResources[key] = r - - // set the provider FQN for the resource - if r.ProviderConfigRef != nil { - r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) - } else { - r.Provider = m.ImpliedProviderForUnqualifiedType(r.Addr().ImpliedProvider()) - } - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - if existing, exists := m.DataResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.DataResources[key] = r - - // set the provider FQN for the resource - if r.ProviderConfigRef != nil { - r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) - } else { - r.Provider = m.ImpliedProviderForUnqualifiedType(r.Addr().ImpliedProvider()) - } - } - - return diags -} - -func (m *Module) mergeFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - if len(file.CoreVersionConstraints) != 0 { - // This is a bit of a strange case for overriding since we normally - // would union together across multiple files anyway, but we'll - // allow it and have each override file clobber any existing list. - m.CoreVersionConstraints = nil - for _, constraint := range file.CoreVersionConstraints { - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - } - - if len(file.Backends) != 0 { - switch len(file.Backends) { - case 1: - m.Backend = file.Backends[0] - default: - // An override file with multiple backends is still invalid, even - // though it can override backends from _other_ files. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), - Subject: &file.Backends[1].DeclRange, - }) - } - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - existing, exists := m.ProviderConfigs[key] - if pc.Alias == "" { - // We allow overriding a non-existing _default_ provider configuration - // because the user model is that an absent provider configuration - // implies an empty provider configuration, which is what the user - // is therefore overriding here. - if exists { - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } else { - m.ProviderConfigs[key] = pc - } - } else { - // For aliased providers, there must be a base configuration to - // override. This allows us to detect and report alias typos - // that might otherwise cause the override to not apply. - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base provider configuration for override", - Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), - Subject: &pc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } - } - - for _, v := range file.Variables { - existing, exists := m.Variables[v.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base variable declaration to override", - Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), - Subject: &v.DeclRange, - }) - continue - } - mergeDiags := existing.merge(v) - diags = append(diags, mergeDiags...) - } - - for _, l := range file.Locals { - existing, exists := m.Locals[l.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base local value definition to override", - Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), - Subject: &l.DeclRange, - }) - continue - } - mergeDiags := existing.merge(l) - diags = append(diags, mergeDiags...) - } - - for _, o := range file.Outputs { - existing, exists := m.Outputs[o.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base output definition to override", - Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), - Subject: &o.DeclRange, - }) - continue - } - mergeDiags := existing.merge(o) - diags = append(diags, mergeDiags...) - } - - for _, mc := range file.ModuleCalls { - existing, exists := m.ModuleCalls[mc.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing module call to override", - Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), - Subject: &mc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(mc) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - existing, exists := m.ManagedResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing resource to override", - Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - existing, exists := m.DataResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing data resource to override", - Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) - diags = append(diags, mergeDiags...) - } - - return diags -} - -// gatherProviderLocalNames is a helper function that populatesA a map of -// provider FQNs -> provider local names. This information is useful for -// user-facing output, which should include both the FQN and LocalName. It must -// only be populated after the module has been parsed. -func (m *Module) gatherProviderLocalNames() { - providers := make(map[addrs.Provider]string) - for k, v := range m.ProviderRequirements.RequiredProviders { - providers[v.Type] = k - } - m.ProviderLocalNames = providers -} - -// LocalNameForProvider returns the module-specific user-supplied local name for -// a given provider FQN, or the default local name if none was supplied. -func (m *Module) LocalNameForProvider(p addrs.Provider) string { - if existing, exists := m.ProviderLocalNames[p]; exists { - return existing - } else { - // If there isn't a map entry, fall back to the default: - // Type = LocalName - return p.Type - } -} - -// ProviderForLocalConfig returns the provider FQN for a given -// LocalProviderConfig, based on its local name. -func (m *Module) ProviderForLocalConfig(pc addrs.LocalProviderConfig) addrs.Provider { - return m.ImpliedProviderForUnqualifiedType(pc.LocalName) -} - -// ImpliedProviderForUnqualifiedType returns the provider FQN for a given type, -// first by looking up the type in the provider requirements map, and falling -// back to an implied default provider. -// -// The intended behaviour is that configuring a provider with local name "foo" -// in a required_providers block will result in resources with type "foo" using -// that provider. -func (m *Module) ImpliedProviderForUnqualifiedType(pType string) addrs.Provider { - if provider, exists := m.ProviderRequirements.RequiredProviders[pType]; exists { - return provider.Type - } - return addrs.ImpliedProviderForUnqualifiedType(pType) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go deleted file mode 100644 index 25f04813..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module_call.go +++ /dev/null @@ -1,173 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ModuleCall represents a "module" block in a module or file. -type ModuleCall struct { - Name string - - SourceAddr string - SourceAddrRange hcl.Range - SourceSet bool - - Config hcl.Body - - Version VersionConstraint - - Count hcl.Expression - ForEach hcl.Expression - - Providers []PassedProviderConfig - - DependsOn []hcl.Traversal - - DeclRange hcl.Range -} - -func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { - mc := &ModuleCall{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := moduleBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, remain, diags := block.Body.PartialContent(schema) - mc.Config = remain - - if !hclsyntax.ValidIdentifier(mc.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["source"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) - diags = append(diags, valDiags...) - mc.SourceAddrRange = attr.Expr.Range() - mc.SourceSet = true - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - mc.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - if attr, exists := content.Attributes["count"]; exists { - mc.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - if mc.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - - mc.ForEach = attr.Expr - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - mc.DependsOn = append(mc.DependsOn, deps...) - } - - if attr, exists := content.Attributes["providers"]; exists { - seen := make(map[string]hcl.Range) - pairs, pDiags := hcl.ExprMap(attr.Expr) - diags = append(diags, pDiags...) - for _, pair := range pairs { - key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") - diags = append(diags, keyDiags...) - value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") - diags = append(diags, valueDiags...) - if keyDiags.HasErrors() || valueDiags.HasErrors() { - continue - } - - matchKey := key.String() - if prev, exists := seen[matchKey]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider address", - Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), - Subject: pair.Value.Range().Ptr(), - }) - continue - } - - rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) - seen[matchKey] = rng - mc.Providers = append(mc.Providers, PassedProviderConfig{ - InChild: key, - InParent: value, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in module block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return mc, diags -} - -// PassedProviderConfig represents a provider config explicitly passed down to -// a child module, possibly giving it a new local address in the process. -type PassedProviderConfig struct { - InChild *ProviderConfigRef - InParent *ProviderConfigRef -} - -var moduleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - Required: true, - }, - { - Name: "version", - }, - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "depends_on", - }, - { - Name: "providers", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - // These are all reserved for future use. - {Type: "lifecycle"}, - {Type: "locals"}, - {Type: "provider", LabelNames: []string{"type"}}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go deleted file mode 100644 index bf3fd8e4..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module_merge.go +++ /dev/null @@ -1,244 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// The methods in this file are used by Module.mergeFile to apply overrides -// to our different configuration elements. These methods all follow the -// pattern of mutating the receiver to incorporate settings from the parameter, -// returning error diagnostics if any aspect of the parameter cannot be merged -// into the receiver for some reason. -// -// User expectation is that anything _explicitly_ set in the given object -// should take precedence over the corresponding settings in the receiver, -// but that anything omitted in the given object should be left unchanged. -// In some cases it may be reasonable to do a "deep merge" of certain nested -// features, if it is possible to unambiguously correlate the nested elements -// and their behaviors are orthogonal to each other. - -func (p *Provider) merge(op *Provider) hcl.Diagnostics { - var diags hcl.Diagnostics - - if op.Version.Required != nil { - p.Version = op.Version - } - - p.Config = MergeBodies(p.Config, op.Config) - - return diags -} - -func (v *Variable) merge(ov *Variable) hcl.Diagnostics { - var diags hcl.Diagnostics - - if ov.DescriptionSet { - v.Description = ov.Description - v.DescriptionSet = ov.DescriptionSet - } - if ov.Default != cty.NilVal { - v.Default = ov.Default - } - if ov.Type != cty.NilType { - v.Type = ov.Type - } - if ov.ParsingMode != 0 { - v.ParsingMode = ov.ParsingMode - } - - // If the override file overrode type without default or vice-versa then - // it may have created an invalid situation, which we'll catch now by - // attempting to re-convert the value. - // - // Note that here we may be re-converting an already-converted base value - // from the base config. This will be a no-op if the type was not changed, - // but in particular might be user-observable in the edge case where the - // literal value in config could've been converted to the overridden type - // constraint but the converted value cannot. In practice, this situation - // should be rare since most of our conversions are interchangable. - if v.Default != cty.NilVal { - val, err := convert.Convert(v.Default, v.Type) - if err != nil { - // What exactly we'll say in the error message here depends on whether - // it was Default or Type that was overridden here. - switch { - case ov.Type != cty.NilType && ov.Default == cty.NilVal: - // If only the type was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), - Subject: &ov.DeclRange, - }) - case ov.Type == cty.NilType && ov.Default != cty.NilVal: - // Only the default was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - } - } else { - v.Default = val - } - } - - return diags -} - -func (l *Local) merge(ol *Local) hcl.Diagnostics { - var diags hcl.Diagnostics - - // Since a local is just a single expression in configuration, the - // override definition entirely replaces the base definition, including - // the source range so that we'll send the user to the right place if - // there is an error. - l.Expr = ol.Expr - l.DeclRange = ol.DeclRange - - return diags -} - -func (o *Output) merge(oo *Output) hcl.Diagnostics { - var diags hcl.Diagnostics - - if oo.Description != "" { - o.Description = oo.Description - } - if oo.Expr != nil { - o.Expr = oo.Expr - } - if oo.SensitiveSet { - o.Sensitive = oo.Sensitive - o.SensitiveSet = oo.SensitiveSet - } - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(oo.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { - var diags hcl.Diagnostics - - if omc.SourceSet { - mc.SourceAddr = omc.SourceAddr - mc.SourceAddrRange = omc.SourceAddrRange - mc.SourceSet = omc.SourceSet - } - - if omc.Count != nil { - mc.Count = omc.Count - } - - if omc.ForEach != nil { - mc.ForEach = omc.ForEach - } - - if len(omc.Version.Required) != 0 { - mc.Version = omc.Version - } - - mc.Config = MergeBodies(mc.Config, omc.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(mc.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (r *Resource) merge(or *Resource, rps map[string]*RequiredProvider) hcl.Diagnostics { - var diags hcl.Diagnostics - - if r.Mode != or.Mode { - // This is always a programming error, since managed and data resources - // are kept in separate maps in the configuration structures. - panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) - } - - if or.Count != nil { - r.Count = or.Count - } - if or.ForEach != nil { - r.ForEach = or.ForEach - } - - if or.ProviderConfigRef != nil { - r.ProviderConfigRef = or.ProviderConfigRef - if existing, exists := rps[or.ProviderConfigRef.Name]; exists { - r.Provider = existing.Type - } else { - r.Provider = addrs.ImpliedProviderForUnqualifiedType(r.ProviderConfigRef.Name) - } - } - - // Provider FQN is set by Terraform during Merge - - if r.Mode == addrs.ManagedResourceMode { - // or.Managed is always non-nil for managed resource mode - - if or.Managed.Connection != nil { - r.Managed.Connection = or.Managed.Connection - } - if or.Managed.CreateBeforeDestroySet { - r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy - r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet - } - if len(or.Managed.IgnoreChanges) != 0 { - r.Managed.IgnoreChanges = or.Managed.IgnoreChanges - } - if or.Managed.PreventDestroySet { - r.Managed.PreventDestroy = or.Managed.PreventDestroy - r.Managed.PreventDestroySet = or.Managed.PreventDestroySet - } - if len(or.Managed.Provisioners) != 0 { - r.Managed.Provisioners = or.Managed.Provisioners - } - } - - r.Config = MergeBodies(r.Config, or.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(or.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go deleted file mode 100644 index 7b51eae8..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go +++ /dev/null @@ -1,143 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// MergeBodies creates a new HCL body that contains a combination of the -// given base and override bodies. Attributes and blocks defined in the -// override body take precedence over those of the same name defined in -// the base body. -// -// If any block of a particular type appears in "override" then it will -// replace _all_ of the blocks of the same type in "base" in the new -// body. -func MergeBodies(base, override hcl.Body) hcl.Body { - return mergeBody{ - Base: base, - Override: override, - } -} - -// mergeBody is a hcl.Body implementation that wraps a pair of other bodies -// and allows attributes and blocks within the override to take precedence -// over those defined in the base body. -// -// This is used to deal with dynamically-processed bodies in Module.mergeFile. -// It uses a shallow-only merging strategy where direct attributes defined -// in Override will override attributes of the same name in Base, while any -// blocks defined in Override will hide all blocks of the same type in Base. -// -// This cannot possibly "do the right thing" in all cases, because we don't -// have enough information about user intent. However, this behavior is intended -// to be reasonable for simple overriding use-cases. -type mergeBody struct { - Base hcl.Body - Override hcl.Body -} - -var _ hcl.Body = mergeBody{} - -func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, _, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - return content, diags -} - -func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - remain := MergeBodies(baseRemain, overrideRemain) - - return content, remain, diags -} - -func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - } - - // For attributes we just assign from each map in turn and let the override - // map clobber any matching entries from base. - for k, a := range base.Attributes { - content.Attributes[k] = a - } - for k, a := range override.Attributes { - content.Attributes[k] = a - } - - // Things are a little more interesting for blocks because they arrive - // as a flat list. Our merging semantics call for us to suppress blocks - // from base if at least one block of the same type appears in override. - // We explicitly do not try to correlate and deeply merge nested blocks, - // since we don't have enough context here to infer user intent. - - overriddenBlockTypes := make(map[string]bool) - for _, block := range override.Blocks { - if block.Type == "dynamic" { - overriddenBlockTypes[block.Labels[0]] = true - continue - } - overriddenBlockTypes[block.Type] = true - } - for _, block := range base.Blocks { - // We skip over dynamic blocks whose type label is an overridden type - // but note that below we do still leave them as dynamic blocks in - // the result because expanding the dynamic blocks that are left is - // done much later during the core graph walks, where we can safely - // evaluate the expressions. - if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { - continue - } - if overriddenBlockTypes[block.Type] { - continue - } - content.Blocks = append(content.Blocks, block) - } - for _, block := range override.Blocks { - content.Blocks = append(content.Blocks, block) - } - - return content -} - -func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := make(hcl.Attributes) - - baseAttrs, aDiags := b.Base.JustAttributes() - diags = append(diags, aDiags...) - overrideAttrs, aDiags := b.Override.JustAttributes() - diags = append(diags, aDiags...) - - for k, a := range baseAttrs { - ret[k] = a - } - for k, a := range overrideAttrs { - ret[k] = a - } - - return ret, diags -} - -func (b mergeBody) MissingItemRange() hcl.Range { - return b.Base.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go deleted file mode 100644 index 128bd278..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/named_values.go +++ /dev/null @@ -1,574 +0,0 @@ -package configs - -import ( - "fmt" - "unicode" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/typeexpr" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/addrs" -) - -// A consistent detail message for all "not a valid identifier" diagnostics. -const badIdentifierDetail = "A name must start with a letter or underscore and may contain only letters, digits, underscores, and dashes." - -// Variable represents a "variable" block in a module or file. -type Variable struct { - Name string - Description string - Default cty.Value - Type cty.Type - ParsingMode VariableParsingMode - Validations []*VariableValidation - - DescriptionSet bool - - DeclRange hcl.Range -} - -func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { - v := &Variable{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - // Unless we're building an override, we'll set some defaults - // which we might override with attributes below. We leave these - // as zero-value in the override case so we can recognize whether - // or not they are set when we merge. - if !override { - v.Type = cty.DynamicPseudoType - v.ParsingMode = VariableParseLiteral - } - - content, diags := block.Body.Content(variableBlockSchema) - - if !hclsyntax.ValidIdentifier(v.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - // Don't allow declaration of variables that would conflict with the - // reserved attribute and block type names in a "module" block, since - // these won't be usable for child modules. - for _, attr := range moduleBlockSchema.Attributes { - if attr.Name == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), - Subject: &block.LabelRanges[0], - }) - } - } - for _, blockS := range moduleBlockSchema.Blocks { - if blockS.Type == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), - Subject: &block.LabelRanges[0], - }) - } - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) - diags = append(diags, valDiags...) - v.DescriptionSet = true - } - - if attr, exists := content.Attributes["type"]; exists { - ty, parseMode, tyDiags := decodeVariableType(attr.Expr) - diags = append(diags, tyDiags...) - v.Type = ty - v.ParsingMode = parseMode - } - - if attr, exists := content.Attributes["default"]; exists { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - - // Convert the default to the expected type so we can catch invalid - // defaults early and allow later code to assume validity. - // Note that this depends on us having already processed any "type" - // attribute above. - // However, we can't do this if we're in an override file where - // the type might not be set; we'll catch that during merge. - if v.Type != cty.NilType { - var err error - val, err = convert.Convert(val, v.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), - Subject: attr.Expr.Range().Ptr(), - }) - val = cty.DynamicVal - } - } - - v.Default = val - } - - for _, block := range content.Blocks { - switch block.Type { - - case "validation": - vv, moreDiags := decodeVariableValidationBlock(v.Name, block, override) - diags = append(diags, moreDiags...) - v.Validations = append(v.Validations, vv) - - default: - // The above cases should be exhaustive for all block types - // defined in variableBlockSchema - panic(fmt.Sprintf("unhandled block type %q", block.Type)) - } - } - - return v, diags -} - -func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { - if exprIsNativeQuotedString(expr) { - // Here we're accepting the pre-0.12 form of variable type argument where - // the string values "string", "list" and "map" are accepted has a hint - // about the type used primarily for deciding how to parse values - // given on the command line and in environment variables. - // Only the native syntax ends up in this codepath; we handle the - // JSON syntax (which is, of course, quoted even in the new format) - // in the normal codepath below. - val, diags := expr.Value(nil) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - str := val.AsString() - switch str { - case "string": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted type constraints are deprecated", - Detail: "Terraform 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of Terraform. To silence this warning, remove the quotes around \"string\".", - Subject: expr.Range().Ptr(), - }) - return cty.String, VariableParseLiteral, diags - case "list": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted type constraints are deprecated", - Detail: "Terraform 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of Terraform. To silence this warning, remove the quotes around \"list\" and write list(string) instead to explicitly indicate that the list elements are strings.", - Subject: expr.Range().Ptr(), - }) - return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags - case "map": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted type constraints are deprecated", - Detail: "Terraform 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of Terraform. To silence this warning, remove the quotes around \"map\" and write map(string) instead to explicitly indicate that the map elements are strings.", - Subject: expr.Range().Ptr(), - }) - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags - default: - return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: "Invalid legacy variable type hint", - Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, - Subject: expr.Range().Ptr(), - }} - } - } - - // First we'll deal with some shorthand forms that the HCL-level type - // expression parser doesn't include. These both emulate pre-0.12 behavior - // of allowing a list or map of any element type as long as all of the - // elements are consistent. This is the same as list(any) or map(any). - switch hcl.ExprAsKeyword(expr) { - case "list": - return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil - case "map": - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil - } - - ty, diags := typeexpr.TypeConstraint(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - - switch { - case ty.IsPrimitiveType(): - // Primitive types use literal parsing. - return ty, VariableParseLiteral, diags - default: - // Everything else uses HCL parsing - return ty, VariableParseHCL, diags - } -} - -// Required returns true if this variable is required to be set by the caller, -// or false if there is a default value that will be used when it isn't set. -func (v *Variable) Required() bool { - return v.Default == cty.NilVal -} - -// VariableParsingMode defines how values of a particular variable given by -// text-only mechanisms (command line arguments and environment variables) -// should be parsed to produce the final value. -type VariableParsingMode rune - -// VariableParseLiteral is a variable parsing mode that just takes the given -// string directly as a cty.String value. -const VariableParseLiteral VariableParsingMode = 'L' - -// VariableParseHCL is a variable parsing mode that attempts to parse the given -// string as an HCL expression and returns the result. -const VariableParseHCL VariableParsingMode = 'H' - -// Parse uses the receiving parsing mode to process the given variable value -// string, returning the result along with any diagnostics. -// -// A VariableParsingMode does not know the expected type of the corresponding -// variable, so it's the caller's responsibility to attempt to convert the -// result to the appropriate type and return to the user any diagnostics that -// conversion may produce. -// -// The given name is used to create a synthetic filename in case any diagnostics -// must be generated about the given string value. This should be the name -// of the root module variable whose value will be populated from the given -// string. -// -// If the returned diagnostics has errors, the returned value may not be -// valid. -func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { - switch m { - case VariableParseLiteral: - return cty.StringVal(value), nil - case VariableParseHCL: - fakeFilename := fmt.Sprintf("", name) - expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, valDiags := expr.Value(nil) - diags = append(diags, valDiags...) - return val, diags - default: - // Should never happen - panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) - } -} - -// VariableValidation represents a configuration-defined validation rule -// for a particular input variable, given as a "validation" block inside -// a "variable" block. -type VariableValidation struct { - // Condition is an expression that refers to the variable being tested - // and contains no other references. The expression must return true - // to indicate that the value is valid or false to indicate that it is - // invalid. If the expression produces an error, that's considered a bug - // in the module defining the validation rule, not an error in the caller. - Condition hcl.Expression - - // ErrorMessage is one or more full sentences, which would need to be in - // English for consistency with the rest of the error message output but - // can in practice be in any language as long as it ends with a period. - // The message should describe what is required for the condition to return - // true in a way that would make sense to a caller of the module. - ErrorMessage string - - DeclRange hcl.Range -} - -func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*VariableValidation, hcl.Diagnostics) { - var diags hcl.Diagnostics - vv := &VariableValidation{ - DeclRange: block.DefRange, - } - - if override { - // For now we'll just forbid overriding validation blocks, to simplify - // the initial design. If we can find a clear use-case for overriding - // validations in override files and there's a way to define it that - // isn't confusing then we could relax this. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Can't override variable validation rules", - Detail: "Variable \"validation\" blocks cannot be used in override files.", - Subject: vv.DeclRange.Ptr(), - }) - return vv, diags - } - - content, moreDiags := block.Body.Content(variableValidationBlockSchema) - diags = append(diags, moreDiags...) - - if attr, exists := content.Attributes["condition"]; exists { - vv.Condition = attr.Expr - - // The validation condition can only refer to the variable itself, - // to ensure that the variable declaration can't create additional - // edges in the dependency graph. - goodRefs := 0 - for _, traversal := range vv.Condition.Variables() { - ref, moreDiags := addrs.ParseRef(traversal) - if !moreDiags.HasErrors() { - if addr, ok := ref.Subject.(addrs.InputVariable); ok { - if addr.Name == varName { - goodRefs++ - continue // Reference is valid - } - } - } - // If we fall out here then the reference is invalid. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference in variable validation", - Detail: fmt.Sprintf("The condition for variable %q can only refer to the variable itself, using var.%s.", varName, varName), - Subject: traversal.SourceRange().Ptr(), - }) - } - if goodRefs < 1 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable validation condition", - Detail: fmt.Sprintf("The condition for variable %q must refer to var.%s in order to test incoming values.", varName, varName), - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - if attr, exists := content.Attributes["error_message"]; exists { - moreDiags := gohcl.DecodeExpression(attr.Expr, nil, &vv.ErrorMessage) - diags = append(diags, moreDiags...) - if !moreDiags.HasErrors() { - const errSummary = "Invalid validation error message" - switch { - case vv.ErrorMessage == "": - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "An empty string is not a valid nor useful error message.", - Subject: attr.Expr.Range().Ptr(), - }) - case !looksLikeSentences(vv.ErrorMessage): - // Because we're going to include this string verbatim as part - // of a bigger error message written in our usual style in - // English, we'll require the given error message to conform - // to that. We might relax this in future if e.g. we start - // presenting these error messages in a different way, or if - // Terraform starts supporting producing error messages in - // other human languages, etc. - // For pragmatism we also allow sentences ending with - // exclamation points, but we don't mention it explicitly here - // because that's not really consistent with the Terraform UI - // writing style. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "Validation error message must be at least one full English sentence starting with an uppercase letter and ending with a period or question mark.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - } - - return vv, diags -} - -// looksLikeSentence is a simple heuristic that encourages writing error -// messages that will be presentable when included as part of a larger -// Terraform error diagnostic whose other text is written in the Terraform -// UI writing style. -// -// This is intentionally not a very strong validation since we're assuming -// that module authors want to write good messages and might just need a nudge -// about Terraform's specific style, rather than that they are going to try -// to work around these rules to write a lower-quality message. -func looksLikeSentences(s string) bool { - if len(s) < 1 { - return false - } - runes := []rune(s) // HCL guarantees that all strings are valid UTF-8 - first := runes[0] - last := runes[len(s)-1] - - // If the first rune is a letter then it must be an uppercase letter. - // (This will only see the first rune in a multi-rune combining sequence, - // but the first rune is generally the letter if any are, and if not then - // we'll just ignore it because we're primarily expecting English messages - // right now anyway, for consistency with all of Terraform's other output.) - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - return false - } - - // The string must be at least one full sentence, which implies having - // sentence-ending punctuation. - // (This assumes that if a sentence ends with quotes then the period - // will be outside the quotes, which is consistent with Terraform's UI - // writing style.) - return last == '.' || last == '?' || last == '!' -} - -// Output represents an "output" block in a module or file. -type Output struct { - Name string - Description string - Expr hcl.Expression - DependsOn []hcl.Traversal - Sensitive bool - - DescriptionSet bool - SensitiveSet bool - - DeclRange hcl.Range -} - -func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { - o := &Output{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := outputBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, diags := block.Body.Content(schema) - - if !hclsyntax.ValidIdentifier(o.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid output name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) - diags = append(diags, valDiags...) - o.DescriptionSet = true - } - - if attr, exists := content.Attributes["value"]; exists { - o.Expr = attr.Expr - } - - if attr, exists := content.Attributes["sensitive"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) - diags = append(diags, valDiags...) - o.SensitiveSet = true - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - o.DependsOn = append(o.DependsOn, deps...) - } - - return o, diags -} - -// Local represents a single entry from a "locals" block in a module or file. -// The "locals" block itself is not represented, because it serves only to -// provide context for us to interpret its contents. -type Local struct { - Name string - Expr hcl.Expression - - DeclRange hcl.Range -} - -func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - if len(attrs) == 0 { - return nil, diags - } - - locals := make([]*Local, 0, len(attrs)) - for name, attr := range attrs { - if !hclsyntax.ValidIdentifier(name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid local value name", - Detail: badIdentifierDetail, - Subject: &attr.NameRange, - }) - } - - locals = append(locals, &Local{ - Name: name, - Expr: attr.Expr, - DeclRange: attr.Range, - }) - } - return locals, diags -} - -// Addr returns the address of the local value declared by the receiver, -// relative to its containing module. -func (l *Local) Addr() addrs.LocalValue { - return addrs.LocalValue{ - Name: l.Name, - } -} - -var variableBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "default", - }, - { - Name: "type", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "validation", - }, - }, -} - -var variableValidationBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "condition", - Required: true, - }, - { - Name: "error_message", - Required: true, - }, - }, -} - -var outputBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "value", - Required: true, - }, - { - Name: "depends_on", - }, - { - Name: "sensitive", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go deleted file mode 100644 index 2a621b57..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser.go +++ /dev/null @@ -1,100 +0,0 @@ -package configs - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/spf13/afero" -) - -// Parser is the main interface to read configuration files and other related -// files from disk. -// -// It retains a cache of all files that are loaded so that they can be used -// to create source code snippets in diagnostics, etc. -type Parser struct { - fs afero.Afero - p *hclparse.Parser -} - -// NewParser creates and returns a new Parser that reads files from the given -// filesystem. If a nil filesystem is passed then the system's "real" filesystem -// will be used, via afero.OsFs. -func NewParser(fs afero.Fs) *Parser { - if fs == nil { - fs = afero.OsFs{} - } - - return &Parser{ - fs: afero.Afero{Fs: fs}, - p: hclparse.NewParser(), - } -} - -// LoadHCLFile is a low-level method that reads the file at the given path, -// parses it, and returns the hcl.Body representing its root. In many cases -// it is better to use one of the other Load*File methods on this type, -// which additionally decode the root body in some way and return a higher-level -// construct. -// -// If the file cannot be read at all -- e.g. because it does not exist -- then -// this method will return a nil body and error diagnostics. In this case -// callers may wish to ignore the provided error diagnostics and produce -// a more context-sensitive error instead. -// -// The file will be parsed using the HCL native syntax unless the filename -// ends with ".json", in which case the HCL JSON syntax will be used. -func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { - src, err := p.fs.ReadFile(path) - - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q could not be read.", path), - }, - } - } - - var file *hcl.File - var diags hcl.Diagnostics - switch { - case strings.HasSuffix(path, ".json"): - file, diags = p.p.ParseJSON(src, path) - default: - file, diags = p.p.ParseHCL(src, path) - } - - // If the returned file or body is nil, then we'll return a non-nil empty - // body so we'll meet our contract that nil means an error reading the file. - if file == nil || file.Body == nil { - return hcl.EmptyBody(), diags - } - - return file.Body, diags -} - -// Sources returns a map of the cached source buffers for all files that -// have been loaded through this parser, with source filenames (as requested -// when each file was opened) as the keys. -func (p *Parser) Sources() map[string][]byte { - return p.p.Sources() -} - -// ForceFileSource artificially adds source code to the cache of file sources, -// as if it had been loaded from the given filename. -// -// This should be used only in special situations where configuration is loaded -// some other way. Most callers should load configuration via methods of -// Parser, which will update the sources cache automatically. -func (p *Parser) ForceFileSource(filename string, src []byte) { - // We'll make a synthetic hcl.File here just so we can reuse the - // existing cache. - p.p.AddFile(filename, &hcl.File{ - Body: hcl.EmptyBody(), - Bytes: src, - }) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go deleted file mode 100644 index 354b96a7..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser_config.go +++ /dev/null @@ -1,290 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigFile reads the file at the given path and parses it as a config -// file. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil *File will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, false) -} - -// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes -// certain required attribute constraints in order to interpret the given -// file as an overrides file. -func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, true) -} - -func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { - - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - file := &File{} - - var reqDiags hcl.Diagnostics - file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) - diags = append(diags, reqDiags...) - - // We'll load the experiments first because other decoding logic in the - // loop below might depend on these experiments. - var expDiags hcl.Diagnostics - file.ActiveExperiments, expDiags = sniffActiveExperiments(body) - diags = append(diags, expDiags...) - - content, contentDiags := body.Content(configFileSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, contentDiags := block.Body.Content(terraformBlockSchema) - diags = append(diags, contentDiags...) - - // We ignore the "terraform_version" and "experiments" attributes - // here because sniffCoreVersionRequirements and - // sniffActiveExperiments already dealt with those above. - - for _, innerBlock := range content.Blocks { - switch innerBlock.Type { - - case "backend": - backendCfg, cfgDiags := decodeBackendBlock(innerBlock) - diags = append(diags, cfgDiags...) - if backendCfg != nil { - file.Backends = append(file.Backends, backendCfg) - } - - case "required_providers": - reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) - diags = append(diags, reqsDiags...) - file.RequiredProviders = append(file.RequiredProviders, reqs) - - case "provider_meta": - providerCfg, cfgDiags := decodeProviderMetaBlock(innerBlock) - diags = append(diags, cfgDiags...) - if providerCfg != nil { - file.ProviderMetas = append(file.ProviderMetas, providerCfg) - } - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - case "required_providers": - // required_providers should be nested inside a "terraform" block - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid required_providers block", - Detail: "A \"required_providers\" block must be nested inside a \"terraform\" block.", - Subject: block.TypeRange.Ptr(), - }) - - case "provider": - cfg, cfgDiags := decodeProviderBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ProviderConfigs = append(file.ProviderConfigs, cfg) - } - - case "variable": - cfg, cfgDiags := decodeVariableBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Variables = append(file.Variables, cfg) - } - - case "locals": - defs, defsDiags := decodeLocalsBlock(block) - diags = append(diags, defsDiags...) - file.Locals = append(file.Locals, defs...) - - case "output": - cfg, cfgDiags := decodeOutputBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Outputs = append(file.Outputs, cfg) - } - - case "module": - cfg, cfgDiags := decodeModuleBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ModuleCalls = append(file.ModuleCalls, cfg) - } - - case "resource": - cfg, cfgDiags := decodeResourceBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ManagedResources = append(file.ManagedResources, cfg) - } - - case "data": - cfg, cfgDiags := decodeDataBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.DataResources = append(file.DataResources, cfg) - } - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - return file, diags -} - -// sniffCoreVersionRequirements does minimal parsing of the given body for -// "terraform" blocks with "required_version" attributes, returning the -// requirements found. -// -// This is intended to maximize the chance that we'll be able to read the -// requirements (syntax errors notwithstanding) even if the config file contains -// constructs that might've been added in future Terraform versions -// -// This is a "best effort" sort of method which will return constraints it is -// able to find, but may return no constraints at all if the given body is -// so invalid that it cannot be decoded at all. -func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { - rootContent, _, diags := body.PartialContent(configFileTerraformBlockSniffRootSchema) - - var constraints []VersionConstraint - - for _, block := range rootContent.Blocks { - content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) - diags = append(diags, blockDiags...) - - attr, exists := content.Attributes["required_version"] - if !exists { - continue - } - - constraint, constraintDiags := decodeVersionConstraint(attr) - diags = append(diags, constraintDiags...) - if !constraintDiags.HasErrors() { - constraints = append(constraints, constraint) - } - } - - return constraints, diags -} - -// configFileSchema is the schema for the top-level of a config file. We use -// the low-level HCL API for this level so we can easily deal with each -// block type separately with its own decoding logic. -var configFileSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - { - // This one is not really valid, but we include it here so we - // can create a specialized error message hinting the user to - // nest it inside a "terraform" block. - Type: "required_providers", - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "locals", - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - }, -} - -// terraformBlockSchema is the schema for a top-level "terraform" block in -// a configuration file. -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - {Name: "required_version"}, - {Name: "experiments"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "backend", - LabelNames: []string{"type"}, - }, - { - Type: "required_providers", - }, - { - Type: "provider_meta", - LabelNames: []string{"provider"}, - }, - }, -} - -// configFileTerraformBlockSniffRootSchema is a schema for -// sniffCoreVersionRequirements and sniffActiveExperiments. -var configFileTerraformBlockSniffRootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - }, -} - -// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements -var configFileVersionSniffBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, -} - -// configFileExperimentsSniffBlockSchema is a schema for sniffActiveExperiments, -// to decode a single attribute from inside a "terraform" block. -var configFileExperimentsSniffBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "experiments", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go deleted file mode 100644 index 2923af93..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go +++ /dev/null @@ -1,163 +0,0 @@ -package configs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigDir reads the .tf and .tf.json files in the given directory -// as config files (using LoadConfigFile) and then combines these files into -// a single Module. -// -// If this method returns nil, that indicates that the given directory does not -// exist at all or could not be opened for some reason. Callers may wish to -// detect this case and ignore the returned diagnostics so that they can -// produce a more context-aware error message in that case. -// -// If this method returns a non-nil module while error diagnostics are returned -// then the module may be incomplete but can be used carefully for static -// analysis. -// -// This file does not consider a directory with no files to be an error, and -// will simply return an empty module in that case. Callers should first call -// Parser.IsConfigDir if they wish to recognize that situation. -// -// .tf files are parsed using the HCL native syntax while .tf.json files are -// parsed using the HCL JSON syntax. -func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { - primaryPaths, overridePaths, diags := p.dirFiles(path) - if diags.HasErrors() { - return nil, diags - } - - primary, fDiags := p.loadFiles(primaryPaths, false) - diags = append(diags, fDiags...) - override, fDiags := p.loadFiles(overridePaths, true) - diags = append(diags, fDiags...) - - mod, modDiags := NewModule(primary, override) - diags = append(diags, modDiags...) - - mod.SourceDir = path - - return mod, diags -} - -// ConfigDirFiles returns lists of the primary and override files configuration -// files in the given directory. -// -// If the given directory does not exist or cannot be read, error diagnostics -// are returned. If errors are returned, the resulting lists may be incomplete. -func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - return p.dirFiles(dir) -} - -// IsConfigDir determines whether the given path refers to a directory that -// exists and contains at least one Terraform config file (with a .tf or -// .tf.json extension.) -func (p *Parser) IsConfigDir(path string) bool { - primaryPaths, overridePaths, _ := p.dirFiles(path) - return (len(primaryPaths) + len(overridePaths)) > 0 -} - -func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { - var files []*File - var diags hcl.Diagnostics - - for _, path := range paths { - var f *File - var fDiags hcl.Diagnostics - if override { - f, fDiags = p.LoadConfigFileOverride(path) - } else { - f, fDiags = p.LoadConfigFile(path) - } - diags = append(diags, fDiags...) - if f != nil { - files = append(files, f) - } - } - - return files, diags -} - -func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - infos, err := p.fs.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || IsIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// IsIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} - -// IsEmptyDir returns true if the given filesystem path contains no Terraform -// configuration files. -// -// Unlike the methods of the Parser type, this function always consults the -// real filesystem, and thus it isn't appropriate to use when working with -// configuration loaded from a plan file. -func IsEmptyDir(path string) (bool, error) { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - return true, nil - } - - p := NewParser(nil) - fs, os, diags := p.dirFiles(path) - if diags.HasErrors() { - return false, diags - } - - return len(fs) == 0 && len(os) == 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go deleted file mode 100644 index 10d98e5b..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser_values.go +++ /dev/null @@ -1,43 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// LoadValuesFile reads the file at the given path and parses it as a "values -// file", which is an HCL config file whose top-level attributes are treated -// as arbitrary key.value pairs. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil map will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - vals := make(map[string]cty.Value) - attrs, attrDiags := body.JustAttributes() - diags = append(diags, attrDiags...) - if attrs == nil { - return vals, diags - } - - for name, attr := range attrs { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - vals[name] = val - } - - return vals, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go deleted file mode 100644 index 3213e685..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provider.go +++ /dev/null @@ -1,244 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" -) - -// Provider represents a "provider" block in a module or file. A provider -// block is a provider configuration, and there can be zero or more -// configurations for each actual provider. -type Provider struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if no alias set - - Version VersionConstraint - - Config hcl.Body - - DeclRange hcl.Range -} - -func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { - var diags hcl.Diagnostics - - // Produce deprecation messages for any pre-0.12-style - // single-interpolation-only expressions. We do this up front here because - // then we can also catch instances inside special blocks like "connection", - // before PartialContent extracts them. - moreDiags := warnForDeprecatedInterpolationsInBody(block.Body) - diags = append(diags, moreDiags...) - - content, config, moreDiags := block.Body.PartialContent(providerBlockSchema) - diags = append(diags, moreDiags...) - - // Provider names must be localized. Produce an error with a message - // indicating the action the user can take to fix this message if the local - // name is not localized. - name := block.Labels[0] - nameDiags := checkProviderNameNormalized(name, block.DefRange) - diags = append(diags, nameDiags...) - - provider := &Provider{ - Name: name, - NameRange: block.LabelRanges[0], - Config: config, - DeclRange: block.DefRange, - } - - if attr, exists := content.Attributes["alias"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) - diags = append(diags, valDiags...) - provider.AliasRange = attr.Expr.Range().Ptr() - - if !hclsyntax.ValidIdentifier(provider.Alias) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration alias", - Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), - }) - } - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - provider.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - // Reserved attribute names - for _, name := range []string{"count", "depends_on", "for_each", "source"} { - if attr, exists := content.Attributes[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in provider block", - Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), - Subject: &attr.NameRange, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provider block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return provider, diags -} - -// Addr returns the address of the receiving provider configuration, relative -// to its containing module. -func (p *Provider) Addr() addrs.LocalProviderConfig { - return addrs.LocalProviderConfig{ - LocalName: p.Name, - Alias: p.Alias, - } -} - -func (p *Provider) moduleUniqueKey() string { - if p.Alias != "" { - return fmt.Sprintf("%s.%s", p.Name, p.Alias) - } - return p.Name -} - -// ParseProviderConfigCompact parses the given absolute traversal as a relative -// provider address in compact form. The following are examples of traversals -// that can be successfully parsed as compact relative provider configuration -// addresses: -// -// aws -// aws.foo -// -// This function will panic if given a relative traversal. -// -// If the returned diagnostics contains errors then the result value is invalid -// and must not be used. -func ParseProviderConfigCompact(traversal hcl.Traversal) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := addrs.LocalProviderConfig{ - LocalName: traversal.RootName(), - } - - if len(traversal) < 2 { - // Just a type name, then. - return ret, diags - } - - aliasStep := traversal[1] - switch ts := aliasStep.(type) { - case hcl.TraverseAttr: - ret.Alias = ts.Name - return ret, diags - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", - Subject: aliasStep.SourceRange().Ptr(), - }) - } - - if len(traversal) > 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous extra operators after provider configuration address.", - Subject: traversal[2:].SourceRange().Ptr(), - }) - } - - return ret, diags -} - -// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseProviderConfigCompactStr(str string) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return addrs.LocalProviderConfig{}, diags - } - - addr, addrDiags := ParseProviderConfigCompact(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -var providerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "alias", - }, - { - Name: "version", - }, - - // Attribute names reserved for future expansion. - {Name: "count"}, - {Name: "depends_on"}, - {Name: "for_each"}, - {Name: "source"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - // _All_ of these are reserved for future expansion. - {Type: "lifecycle"}, - {Type: "locals"}, - }, -} - -// checkProviderNameNormalized verifies that the given string is already -// normalized and returns an error if not. -func checkProviderNameNormalized(name string, declrange hcl.Range) hcl.Diagnostics { - var diags hcl.Diagnostics - // verify that the provider local name is normalized - normalized, err := addrs.IsProviderPartNormalized(name) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider local name", - Detail: fmt.Sprintf("%s is an invalid provider local name: %s", name, err), - Subject: &declrange, - }) - return diags - } - if !normalized { - // we would have returned this error already - normalizedProvider, _ := addrs.ParseProviderPart(name) - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider local name", - Detail: fmt.Sprintf("Provider names must be normalized. Replace %q with %q to fix this error.", name, normalizedProvider), - Subject: &declrange, - }) - } - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider_meta.go b/vendor/github.com/hashicorp/terraform/configs/provider_meta.go deleted file mode 100644 index e49614f6..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provider_meta.go +++ /dev/null @@ -1,25 +0,0 @@ -package configs - -import "github.com/hashicorp/hcl/v2" - -// ProviderMeta represents a "provider_meta" block inside a "terraform" block -// in a module or file. -type ProviderMeta struct { - Provider string - Config hcl.Body - - ProviderRange hcl.Range - DeclRange hcl.Range -} - -func decodeProviderMetaBlock(block *hcl.Block) (*ProviderMeta, hcl.Diagnostics) { - // verify that the local name is already localized or produce an error. - diags := checkProviderNameNormalized(block.Labels[0], block.DefRange) - - return &ProviderMeta{ - Provider: block.Labels[0], - ProviderRange: block.LabelRanges[0], - Config: block.Body, - DeclRange: block.DefRange, - }, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider_requirements.go b/vendor/github.com/hashicorp/terraform/configs/provider_requirements.go deleted file mode 100644 index 361fec5e..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provider_requirements.go +++ /dev/null @@ -1,124 +0,0 @@ -package configs - -import ( - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" -) - -// RequiredProvider represents a declaration of a dependency on a particular -// provider version or source without actually configuring that provider. This -// is used in child modules that expect a provider to be passed in from their -// parent. -type RequiredProvider struct { - Name string - Source string - Type addrs.Provider - Requirement VersionConstraint - DeclRange hcl.Range -} - -type RequiredProviders struct { - RequiredProviders map[string]*RequiredProvider - DeclRange hcl.Range -} - -func decodeRequiredProvidersBlock(block *hcl.Block) (*RequiredProviders, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - ret := &RequiredProviders{ - RequiredProviders: make(map[string]*RequiredProvider), - DeclRange: block.DefRange, - } - for name, attr := range attrs { - expr, err := attr.Expr.Value(nil) - if err != nil { - diags = append(diags, err...) - } - - // verify that the local name is already localized or produce an error. - nameDiags := checkProviderNameNormalized(name, attr.Expr.Range()) - diags = append(diags, nameDiags...) - - rp := &RequiredProvider{ - Name: name, - DeclRange: attr.Expr.Range(), - } - - switch { - case expr.Type().IsPrimitiveType(): - vc, reqDiags := decodeVersionConstraint(attr) - diags = append(diags, reqDiags...) - rp.Requirement = vc - - case expr.Type().IsObjectType(): - if expr.Type().HasAttribute("version") { - vc := VersionConstraint{ - DeclRange: attr.Range, - } - constraintStr := expr.GetAttr("version").AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", - Subject: attr.Expr.Range().Ptr(), - }) - } else { - vc.Required = constraints - rp.Requirement = vc - } - } - if expr.Type().HasAttribute("source") { - rp.Source = expr.GetAttr("source").AsString() - - fqn, sourceDiags := addrs.ParseProviderSourceString(rp.Source) - - if sourceDiags.HasErrors() { - hclDiags := sourceDiags.ToHCL() - // The diagnostics from ParseProviderSourceString don't contain - // source location information because it has no context to compute - // them from, and so we'll add those in quickly here before we - // return. - for _, diag := range hclDiags { - if diag.Subject == nil { - diag.Subject = attr.Expr.Range().Ptr() - } - } - diags = append(diags, hclDiags...) - } else { - rp.Type = fqn - } - } - - default: - // should not happen - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid required_providers syntax", - Detail: "required_providers entries must be strings or objects.", - Subject: attr.Expr.Range().Ptr(), - }) - } - - if rp.Type.IsZero() && !diags.HasErrors() { // Don't try to generate an FQN if we've encountered errors - pType, err := addrs.ParseProviderPart(rp.Name) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider name", - Detail: err.Error(), - Subject: attr.Expr.Range().Ptr(), - }) - } else { - rp.Type = addrs.ImpliedProviderForUnqualifiedType(pType) - } - } - - ret.RequiredProviders[rp.Name] = rp - } - - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go deleted file mode 100644 index 76938251..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provisioner.go +++ /dev/null @@ -1,204 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" -) - -// Provisioner represents a "provisioner" block when used within a -// "resource" block in a module or file. -type Provisioner struct { - Type string - Config hcl.Body - Connection *Connection - When ProvisionerWhen - OnFailure ProvisionerOnFailure - - DeclRange hcl.Range - TypeRange hcl.Range -} - -func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { - pv := &Provisioner{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - DeclRange: block.DefRange, - When: ProvisionerWhenCreate, - OnFailure: ProvisionerOnFailureFail, - } - - content, config, diags := block.Body.PartialContent(provisionerBlockSchema) - pv.Config = config - - if attr, exists := content.Attributes["when"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "create": - pv.When = ProvisionerWhenCreate - case "destroy": - pv.When = ProvisionerWhenDestroy - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"when\" keyword", - Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", - Subject: expr.Range().Ptr(), - }) - } - } - - // destroy provisioners can only refer to self - if pv.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(config)...) - } - - if attr, exists := content.Attributes["on_failure"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "continue": - pv.OnFailure = ProvisionerOnFailureContinue - case "fail": - pv.OnFailure = ProvisionerOnFailureFail - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"on_failure\" keyword", - Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - // destroy provisioners can only refer to self - if pv.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(block.Body)...) - } - - pv.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provisioner block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return pv, diags -} - -func onlySelfRefs(body hcl.Body) hcl.Diagnostics { - var diags hcl.Diagnostics - - // Provisioners currently do not use any blocks in their configuration. - // Blocks are likely to remain solely for meta parameters, but in the case - // that blocks are supported for provisioners, we will want to extend this - // to find variables in nested blocks. - attrs, _ := body.JustAttributes() - for _, attr := range attrs { - for _, v := range attr.Expr.Variables() { - valid := false - switch v.RootName() { - case "self", "path", "terraform": - valid = true - case "count": - // count must use "index" - if len(v) == 2 { - if t, ok := v[1].(hcl.TraverseAttr); ok && t.Name == "index" { - valid = true - } - } - - case "each": - if len(v) == 2 { - if t, ok := v[1].(hcl.TraverseAttr); ok && t.Name == "key" { - valid = true - } - } - } - - if !valid { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference from destroy provisioner", - Detail: "Destroy-time provisioners and their connection configurations may only " + - "reference attributes of the related resource, via 'self', 'count.index', " + - "or 'each.key'.\n\nReferences to other resources during the destroy phase " + - "can cause dependency cycles and interact poorly with create_before_destroy.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - } - return diags -} - -// Connection represents a "connection" block when used within either a -// "resource" or "provisioner" block in a module or file. -type Connection struct { - Config hcl.Body - - DeclRange hcl.Range -} - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - {Name: "when"}, - {Name: "on_failure"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "connection"}, - {Type: "lifecycle"}, // reserved for future use - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go deleted file mode 100644 index 7ff5a6e0..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerOnFailureInvalid-0] - _ = x[ProvisionerOnFailureContinue-1] - _ = x[ProvisionerOnFailureFail-2] -} - -const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" - -var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} - -func (i ProvisionerOnFailure) String() string { - if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { - return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go deleted file mode 100644 index 9f21b3ac..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerWhenInvalid-0] - _ = x[ProvisionerWhenCreate-1] - _ = x[ProvisionerWhenDestroy-2] -} - -const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" - -var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} - -func (i ProvisionerWhen) String() string { - if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { - return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go deleted file mode 100644 index 72adf66f..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/resource.go +++ /dev/null @@ -1,517 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" -) - -// Resource represents a "resource" or "data" block in a module or file. -type Resource struct { - Mode addrs.ResourceMode - Name string - Type string - Config hcl.Body - Count hcl.Expression - ForEach hcl.Expression - - ProviderConfigRef *ProviderConfigRef - Provider addrs.Provider - - DependsOn []hcl.Traversal - - // Managed is populated only for Mode = addrs.ManagedResourceMode, - // containing the additional fields that apply to managed resources. - // For all other resource modes, this field is nil. - Managed *ManagedResource - - DeclRange hcl.Range - TypeRange hcl.Range -} - -// ManagedResource represents a "resource" block in a module or file. -type ManagedResource struct { - Connection *Connection - Provisioners []*Provisioner - - CreateBeforeDestroy bool - PreventDestroy bool - IgnoreChanges []hcl.Traversal - IgnoreAllChanges bool - - CreateBeforeDestroySet bool - PreventDestroySet bool -} - -func (r *Resource) moduleUniqueKey() string { - return r.Addr().String() -} - -// Addr returns a resource address for the receiver that is relative to the -// resource's containing module. -func (r *Resource) Addr() addrs.Resource { - return addrs.Resource{ - Mode: r.Mode, - Type: r.Type, - Name: r.Name, - } -} - -// ProviderConfigAddr returns the address for the provider configuration that -// should be used for this resource. This function returns a default provider -// config addr if an explicit "provider" argument was not provided. -func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { - if r.ProviderConfigRef == nil { - return addrs.LocalProviderConfig{ - LocalName: r.Provider.Type, - } - } - - return addrs.LocalProviderConfig{ - LocalName: r.ProviderConfigRef.Name, - Alias: r.ProviderConfigRef.Alias, - } -} - -func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - var diags hcl.Diagnostics - r := &Resource{ - Mode: addrs.ManagedResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - Managed: &ManagedResource{}, - } - - // Produce deprecation messages for any pre-0.12-style - // single-interpolation-only expressions. We do this up front here because - // then we can also catch instances inside special blocks like "connection", - // before PartialContent extracts them. - moreDiags := warnForDeprecatedInterpolationsInBody(block.Body) - diags = append(diags, moreDiags...) - - content, remain, moreDiags := block.Body.PartialContent(resourceBlockSchema) - diags = append(diags, moreDiags...) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same resource block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - var seenLifecycle *hcl.Block - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - case "lifecycle": - if seenLifecycle != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate lifecycle block", - Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenLifecycle = block - - lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) - diags = append(diags, lcDiags...) - - if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) - diags = append(diags, valDiags...) - r.Managed.CreateBeforeDestroySet = true - } - - if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) - diags = append(diags, valDiags...) - r.Managed.PreventDestroySet = true - } - - if attr, exists := lcContent.Attributes["ignore_changes"]; exists { - - // ignore_changes can either be a list of relative traversals - // or it can be just the keyword "all" to ignore changes to this - // resource entirely. - // ignore_changes = [ami, instance_type] - // ignore_changes = all - // We also allow two legacy forms for compatibility with earlier - // versions: - // ignore_changes = ["ami", "instance_type"] - // ignore_changes = ["*"] - - kw := hcl.ExprAsKeyword(attr.Expr) - - switch { - case kw == "all": - r.Managed.IgnoreAllChanges = true - default: - exprs, listDiags := hcl.ExprList(attr.Expr) - diags = append(diags, listDiags...) - - var ignoreAllRange hcl.Range - - for _, expr := range exprs { - - // our expr might be the literal string "*", which - // we accept as a deprecated way of saying "all". - if shimIsIgnoreChangesStar(expr) { - r.Managed.IgnoreAllChanges = true - ignoreAllRange = expr.Range() - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Deprecated ignore_changes wildcard", - Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", - Subject: attr.Expr.Range().Ptr(), - }) - continue - } - - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.RelTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) - } - } - - if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid ignore_changes ruleset", - Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", - Subject: &ignoreAllRange, - Context: attr.Expr.Range().Ptr(), - }) - } - - } - - } - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - r.Managed.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - case "provisioner": - pv, pvDiags := decodeProvisionerBlock(block) - diags = append(diags, pvDiags...) - if pv != nil { - r.Managed.Provisioners = append(r.Managed.Provisioners, pv) - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in resource block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - // Now we can validate the connection block references if there are any destroy provisioners. - // TODO: should we eliminate standalone connection blocks? - if r.Managed.Connection != nil { - for _, p := range r.Managed.Provisioners { - if p.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(r.Managed.Connection.Config)...) - break - } - } - } - - return r, diags -} - -func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - r := &Resource{ - Mode: addrs.DataResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - } - - content, remain, diags := block.Body.PartialContent(dataBlockSchema) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same data block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - for _, block := range content.Blocks { - // All of the block types we accept are just reserved for future use, but some get a specialized error message. - switch block.Type { - case "lifecycle": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported lifecycle block", - Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", - Subject: &block.DefRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in data block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return r, diags -} - -type ProviderConfigRef struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if alias not set -} - -func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var shimDiags hcl.Diagnostics - expr, shimDiags = shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - - // AbsTraversalForExpr produces only generic errors, so we'll discard - // the errors given and produce our own with extra context. If we didn't - // get any errors then we might still have warnings, though. - if !travDiags.HasErrors() { - diags = append(diags, travDiags...) - } - - if len(traversal) < 1 || len(traversal) > 2 { - // A provider reference was given as a string literal in the legacy - // configuration language and there are lots of examples out there - // showing that usage, so we'll sniff for that situation here and - // produce a specialized error message for it to help users find - // the new correct form. - if exprIsNativeQuotedString(expr) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "A provider configuration reference must not be given in quotes.", - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - // verify that the provider local name is normalized - name := traversal.RootName() - nameDiags := checkProviderNameNormalized(name, traversal[0].SourceRange()) - diags = append(diags, nameDiags...) - - ret := &ProviderConfigRef{ - Name: name, - NameRange: traversal[0].SourceRange(), - } - - if len(traversal) > 1 { - aliasStep, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", - Subject: traversal[1].SourceRange().Ptr(), - }) - return ret, diags - } - - ret.Alias = aliasStep.Name - ret.AliasRange = aliasStep.SourceRange().Ptr() - } - - return ret, diags -} - -// Addr returns the provider config address corresponding to the receiving -// config reference. -// -// This is a trivial conversion, essentially just discarding the source -// location information and keeping just the addressing information. -func (r *ProviderConfigRef) Addr() addrs.LocalProviderConfig { - return addrs.LocalProviderConfig{ - LocalName: r.Name, - Alias: r.Alias, - } -} - -func (r *ProviderConfigRef) String() string { - if r == nil { - return "" - } - if r.Alias != "" { - return fmt.Sprintf("%s.%s", r.Name, r.Alias) - } - return r.Name -} - -var commonResourceAttributes = []hcl.AttributeSchema{ - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "provider", - }, - { - Name: "depends_on", - }, -} - -var resourceBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "locals"}, // reserved for future use - {Type: "lifecycle"}, - {Type: "connection"}, - {Type: "provisioner", LabelNames: []string{"type"}}, - }, -} - -var dataBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, // reserved for future use - {Type: "locals"}, // reserved for future use - }, -} - -var resourceLifecycleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "create_before_destroy", - }, - { - Name: "prevent_destroy", - }, - { - Name: "ignore_changes", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go deleted file mode 100644 index cd914e5d..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/synth_body.go +++ /dev/null @@ -1,118 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes -// corresponding to the elements given in the values map. -// -// This is useful in situations where, for example, values provided on the -// command line can override values given in configuration, using MergeBodies. -// -// The given filename is used in case any diagnostics are returned. Since -// the created body is synthetic, it is likely that this will not be a "real" -// filename. For example, if from a command line argument it could be -// a representation of that argument's name, such as "-var=...". -func SynthBody(filename string, values map[string]cty.Value) hcl.Body { - return synthBody{ - Filename: filename, - Values: values, - } -} - -type synthBody struct { - Filename string - Values map[string]cty.Value -} - -func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, remain, diags := b.PartialContent(schema) - remainS := remain.(synthBody) - for name := range remainS.Values { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported attribute", - Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), - Subject: b.synthRange().Ptr(), - }) - } - return content, diags -} - -func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - MissingItemRange: b.synthRange(), - } - - remainValues := make(map[string]cty.Value) - for attrName, val := range b.Values { - remainValues[attrName] = val - } - - for _, attrS := range schema.Attributes { - delete(remainValues, attrS.Name) - val, defined := b.Values[attrS.Name] - if !defined { - if attrS.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required attribute", - Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), - Subject: b.synthRange().Ptr(), - }) - } - continue - } - content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) - } - - // We just ignore blocks altogether, because this body type never has - // nested blocks. - - remain := synthBody{ - Filename: b.Filename, - Values: remainValues, - } - - return content, remain, diags -} - -func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - ret := make(hcl.Attributes) - for name, val := range b.Values { - ret[name] = b.synthAttribute(name, val) - } - return ret, nil -} - -func (b synthBody) MissingItemRange() hcl.Range { - return b.synthRange() -} - -func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { - rng := b.synthRange() - return &hcl.Attribute{ - Name: name, - Expr: &hclsyntax.LiteralValueExpr{ - Val: val, - SrcRange: rng, - }, - NameRange: rng, - Range: rng, - } -} - -func (b synthBody) synthRange() hcl.Range { - return hcl.Range{ - Filename: b.Filename, - Start: hcl.Pos{Line: 1, Column: 1}, - End: hcl.Pos{Line: 1, Column: 1}, - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go deleted file mode 100644 index e135546f..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/util.go +++ /dev/null @@ -1,63 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// exprIsNativeQuotedString determines whether the given expression looks like -// it's a quoted string in the HCL native syntax. -// -// This should be used sparingly only for situations where our legacy HCL -// decoding would've expected a keyword or reference in quotes but our new -// decoding expects the keyword or reference to be provided directly as -// an identifier-based expression. -func exprIsNativeQuotedString(expr hcl.Expression) bool { - _, ok := expr.(*hclsyntax.TemplateExpr) - return ok -} - -// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is -// equivalent except that any required attributes are forced to not be required. -// -// This is useful for dealing with "override" config files, which are allowed -// to omit things that they don't wish to override from the main configuration. -// -// The returned schema may have some pointers in common with the given schema, -// so neither the given schema nor the returned schema should be modified after -// using this function in order to avoid confusion. -// -// Overrides are rarely used, so it's recommended to just create the override -// schema on the fly only when it's needed, rather than storing it in a global -// variable as we tend to do for a primary schema. -func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} - -// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that -// is equivalent except that it accepts an additional block type "dynamic" with -// a single label, used to recognize usage of the HCL dynamic block extension. -func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - - copy(ret.Blocks, schema.Blocks) - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, - }) - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go deleted file mode 100644 index c02ad4b5..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go +++ /dev/null @@ -1,45 +0,0 @@ -package configs - -// VariableTypeHint is an enumeration used for the Variable.TypeHint field, -// which is an incompletely-specified type for the variable which is used -// as a hint for whether a value provided in an ambiguous context (on the -// command line or in an environment variable) should be taken literally as a -// string or parsed as an HCL expression to produce a data structure. -// -// The type hint is applied to runtime values as well, but since it does not -// accurately describe a precise type it is not fully-sufficient to infer -// the dynamic type of a value passed through a variable. -// -// These hints use inaccurate terminology for historical reasons. Full details -// are in the documentation for each constant in this enumeration, but in -// summary: -// -// TypeHintString requires a primitive type -// TypeHintList requires a type that could be converted to a tuple -// TypeHintMap requires a type that could be converted to an object -type VariableTypeHint rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint - -// TypeHintNone indicates the absence of a type hint. Values specified in -// ambiguous contexts will be treated as literal strings, as if TypeHintString -// were selected, but no runtime value checks will be applied. This is reasonable -// type hint for a module that is never intended to be used at the top-level -// of a configuration, since descendent modules never receive values from -// ambiguous contexts. -const TypeHintNone VariableTypeHint = 0 - -// TypeHintString spec indicates that a value provided in an ambiguous context -// should be treated as a literal string, and additionally requires that the -// runtime value for the variable is of a primitive type (string, number, bool). -const TypeHintString VariableTypeHint = 'S' - -// TypeHintList indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an tuple, list, or set type. -const TypeHintList VariableTypeHint = 'L' - -// TypeHintMap indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an object or map type. -const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go deleted file mode 100644 index 2b50428c..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeHintNone-0] - _ = x[TypeHintString-83] - _ = x[TypeHintList-76] - _ = x[TypeHintMap-77] -} - -const ( - _VariableTypeHint_name_0 = "TypeHintNone" - _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" - _VariableTypeHint_name_2 = "TypeHintString" -) - -var ( - _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} -) - -func (i VariableTypeHint) String() string { - switch { - case i == 0: - return _VariableTypeHint_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] - case i == 83: - return _VariableTypeHint_name_2 - default: - return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go deleted file mode 100644 index 0f541dc7..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go +++ /dev/null @@ -1,71 +0,0 @@ -package configs - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// VersionConstraint represents a version constraint on some resource -// (e.g. Terraform Core, a provider, a module, ...) that carries with it -// a source range so that a helpful diagnostic can be printed in the event -// that a particular constraint does not match. -type VersionConstraint struct { - Required version.Constraints - DeclRange hcl.Range -} - -func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { - ret := VersionConstraint{ - DeclRange: attr.Range, - } - - val, diags := attr.Expr.Value(nil) - if diags.HasErrors() { - return ret, diags - } - var err error - val, err = convert.Convert(val, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - if val.IsNull() { - // A null version constraint is strange, but we'll just treat it - // like an empty constraint set. - return ret, diags - } - - if !val.IsWhollyKnown() { - // If there is a syntax error, HCL sets the value of the given attribute - // to cty.DynamicVal. A diagnostic for the syntax error will already - // bubble up, so we will move forward gracefully here. - return ret, diags - } - - constraintStr := val.AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - ret.Required = constraints - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go deleted file mode 100644 index 8ca4e910..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ /dev/null @@ -1,354 +0,0 @@ -package dag - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/go-multierror" -) - -// AcyclicGraph is a specialization of Graph that cannot have cycles. With -// this property, we get the property of sane graph traversal. -type AcyclicGraph struct { - Graph -} - -// WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) tfdiags.Diagnostics - -// DepthWalkFunc is a walk function that also receives the current depth of the -// walk as an argument -type DepthWalkFunc func(Vertex, int) error - -func (g *AcyclicGraph) DirectedGraph() Grapher { - return g -} - -// Returns a Set that includes every Vertex yielded by walking down from the -// provided starting Vertex v. -func (g *AcyclicGraph) Ancestors(v Vertex) (Set, error) { - s := make(Set) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.DepthFirstWalk(g.DownEdges(v), memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Returns a Set that includes every Vertex yielded by walking up from the -// provided starting Vertex v. -func (g *AcyclicGraph) Descendents(v Vertex) (Set, error) { - s := make(Set) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.ReverseDepthFirstWalk(g.UpEdges(v), memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Root returns the root of the DAG, or an error. -// -// Complexity: O(V) -func (g *AcyclicGraph) Root() (Vertex, error) { - roots := make([]Vertex, 0, 1) - for _, v := range g.Vertices() { - if g.UpEdges(v).Len() == 0 { - roots = append(roots, v) - } - } - - if len(roots) > 1 { - // TODO(mitchellh): make this error message a lot better - return nil, fmt.Errorf("multiple roots: %#v", roots) - } - - if len(roots) == 0 { - return nil, fmt.Errorf("no roots found") - } - - return roots[0], nil -} - -// TransitiveReduction performs the transitive reduction of graph g in place. -// The transitive reduction of a graph is a graph with as few edges as -// possible with the same reachability as the original graph. This means -// that if there are three nodes A => B => C, and A connects to both -// B and C, and B connects to C, then the transitive reduction is the -// same graph with only a single edge between A and B, and a single edge -// between B and C. -// -// The graph must be valid for this operation to behave properly. If -// Validate() returns an error, the behavior is undefined and the results -// will likely be unexpected. -// -// Complexity: O(V(V+E)), or asymptotically O(VE) -func (g *AcyclicGraph) TransitiveReduction() { - // For each vertex u in graph g, do a DFS starting from each vertex - // v such that the edge (u,v) exists (v is a direct descendant of u). - // - // For each v-prime reachable from v, remove the edge (u, v-prime). - for _, u := range g.Vertices() { - uTargets := g.DownEdges(u) - - g.DepthFirstWalk(g.DownEdges(u), func(v Vertex, d int) error { - shared := uTargets.Intersection(g.DownEdges(v)) - for _, vPrime := range shared { - g.RemoveEdge(BasicEdge(u, vPrime)) - } - - return nil - }) - } -} - -// Validate validates the DAG. A DAG is valid if it has a single root -// with no cycles. -func (g *AcyclicGraph) Validate() error { - if _, err := g.Root(); err != nil { - return err - } - - // Look for cycles of more than 1 component - var err error - cycles := g.Cycles() - if len(cycles) > 0 { - for _, cycle := range cycles { - cycleStr := make([]string, len(cycle)) - for j, vertex := range cycle { - cycleStr[j] = VertexName(vertex) - } - - err = multierror.Append(err, fmt.Errorf( - "Cycle: %s", strings.Join(cycleStr, ", "))) - } - } - - // Look for cycles to self - for _, e := range g.Edges() { - if e.Source() == e.Target() { - err = multierror.Append(err, fmt.Errorf( - "Self reference: %s", VertexName(e.Source()))) - } - } - - return err -} - -func (g *AcyclicGraph) Cycles() [][]Vertex { - var cycles [][]Vertex - for _, cycle := range StronglyConnected(&g.Graph) { - if len(cycle) > 1 { - cycles = append(cycles, cycle) - } - } - return cycles -} - -// Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. The resulting diagnostics -// contains problems from all graphs visited, in no particular order. -func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { - w := &Walker{Callback: cb, Reverse: true} - w.Update(g) - return w.Wait() -} - -// simple convenience helper for converting a dag.Set to a []Vertex -func AsVertexList(s Set) []Vertex { - vertexList := make([]Vertex, 0, len(s)) - for _, raw := range s { - vertexList = append(vertexList, raw.(Vertex)) - } - return vertexList -} - -type vertexAtDepth struct { - Vertex Vertex - Depth int -} - -// DepthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. -func (g *AcyclicGraph) DepthFirstWalk(start Set, f DepthWalkFunc) error { - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, 0, len(start)) - for _, v := range start { - frontier = append(frontier, &vertexAtDepth{ - Vertex: v, - Depth: 0, - }) - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - for _, v := range g.DownEdges(current.Vertex) { - frontier = append(frontier, &vertexAtDepth{ - Vertex: v, - Depth: current.Depth + 1, - }) - } - } - - return nil -} - -// SortedDepthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start, always iterating the nodes in a consistent order. -func (g *AcyclicGraph) SortedDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - // Visit targets of this in a consistent order. - targets := AsVertexList(g.DownEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - } - - return nil -} - -// ReverseDepthFirstWalk does a depth-first walk _up_ the graph starting from -// the vertices in start. -func (g *AcyclicGraph) ReverseDepthFirstWalk(start Set, f DepthWalkFunc) error { - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, 0, len(start)) - for _, v := range start { - frontier = append(frontier, &vertexAtDepth{ - Vertex: v, - Depth: 0, - }) - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - for _, t := range g.UpEdges(current.Vertex) { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - } - - return nil -} - -// SortedReverseDepthFirstWalk does a depth-first walk _up_ the graph starting from -// the vertices in start, always iterating the nodes in a consistent order. -func (g *AcyclicGraph) SortedReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Add next set of targets in a consistent order. - targets := AsVertexList(g.UpEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - } - - return nil -} - -// byVertexName implements sort.Interface so a list of Vertices can be sorted -// consistently by their VertexName -type byVertexName []Vertex - -func (b byVertexName) Len() int { return len(b) } -func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byVertexName) Less(i, j int) bool { - return VertexName(b[i]) < VertexName(b[j]) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go deleted file mode 100644 index 7e6d2af3..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/dot.go +++ /dev/null @@ -1,282 +0,0 @@ -package dag - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// DotOpts are the options for generating a dot formatted Graph. -type DotOpts struct { - // Allows some nodes to decide to only show themselves when the user has - // requested the "verbose" graph. - Verbose bool - - // Highlight Cycles - DrawCycles bool - - // How many levels to expand modules as we draw - MaxDepth int - - // use this to keep the cluster_ naming convention from the previous dot writer - cluster bool -} - -// GraphNodeDotter can be implemented by a node to cause it to be included -// in the dot graph. The Dot method will be called which is expected to -// return a representation of this node. -type GraphNodeDotter interface { - // Dot is called to return the dot formatting for the node. - // The first parameter is the title of the node. - // The second parameter includes user-specified options that affect the dot - // graph. See GraphDotOpts below for details. - DotNode(string, *DotOpts) *DotNode -} - -// DotNode provides a structure for Vertices to return in order to specify their -// dot format. -type DotNode struct { - Name string - Attrs map[string]string -} - -// Returns the DOT representation of this Graph. -func (g *marshalGraph) Dot(opts *DotOpts) []byte { - if opts == nil { - opts = &DotOpts{ - DrawCycles: true, - MaxDepth: -1, - Verbose: true, - } - } - - var w indentWriter - w.WriteString("digraph {\n") - w.Indent() - - // some dot defaults - w.WriteString(`compound = "true"` + "\n") - w.WriteString(`newrank = "true"` + "\n") - - // the top level graph is written as the first subgraph - w.WriteString(`subgraph "root" {` + "\n") - g.writeBody(opts, &w) - - // cluster isn't really used other than for naming purposes in some graphs - opts.cluster = opts.MaxDepth != 0 - maxDepth := opts.MaxDepth - if maxDepth == 0 { - maxDepth = -1 - } - - for _, s := range g.Subgraphs { - g.writeSubgraph(s, opts, maxDepth, &w) - } - - w.Unindent() - w.WriteString("}\n") - return w.Bytes() -} - -func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - name := v.Name - attrs := v.Attrs - if v.graphNodeDotter != nil { - node := v.graphNodeDotter.DotNode(name, opts) - if node == nil { - return []byte{} - } - - newAttrs := make(map[string]string) - for k, v := range attrs { - newAttrs[k] = v - } - for k, v := range node.Attrs { - newAttrs[k] = v - } - - name = node.Name - attrs = newAttrs - } - - buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) - writeAttrs(&buf, attrs) - buf.WriteByte('\n') - - return buf.Bytes() -} - -func (e *marshalEdge) dot(g *marshalGraph) string { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - sourceName := g.vertexByID(e.Source).Name - targetName := g.vertexByID(e.Target).Name - s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) - buf.WriteString(s) - writeAttrs(&buf, e.Attrs) - - return buf.String() -} - -func cycleDot(e *marshalEdge, g *marshalGraph) string { - return e.dot(g) + ` [color = "red", penwidth = "2.0"]` -} - -// Write the subgraph body. The is recursive, and the depth argument is used to -// record the current depth of iteration. -func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { - if depth == 0 { - return - } - depth-- - - name := sg.Name - if opts.cluster { - // we prefix with cluster_ to match the old dot output - name = "cluster_" + name - sg.Attrs["label"] = sg.Name - } - w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) - sg.writeBody(opts, w) - - for _, sg := range sg.Subgraphs { - g.writeSubgraph(sg, opts, depth, w) - } -} - -func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { - w.Indent() - - for _, as := range attrStrings(g.Attrs) { - w.WriteString(as + "\n") - } - - // list of Vertices that aren't to be included in the dot output - skip := map[string]bool{} - - for _, v := range g.Vertices { - if v.graphNodeDotter == nil { - skip[v.ID] = true - continue - } - - w.Write(v.dot(g, opts)) - } - - var dotEdges []string - - if opts.DrawCycles { - for _, c := range g.Cycles { - if len(c) < 2 { - continue - } - - for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { - if j >= len(c) { - j = 0 - } - src := c[i] - tgt := c[j] - - if skip[src.ID] || skip[tgt.ID] { - continue - } - - e := &marshalEdge{ - Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), - Source: src.ID, - Target: tgt.ID, - Attrs: make(map[string]string), - } - - dotEdges = append(dotEdges, cycleDot(e, g)) - src = tgt - } - } - } - - for _, e := range g.Edges { - dotEdges = append(dotEdges, e.dot(g)) - } - - // srot these again to match the old output - sort.Strings(dotEdges) - - for _, e := range dotEdges { - w.WriteString(e + "\n") - } - - w.Unindent() - w.WriteString("}\n") -} - -func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { - if len(attrs) > 0 { - buf.WriteString(" [") - buf.WriteString(strings.Join(attrStrings(attrs), ", ")) - buf.WriteString("]") - } -} - -func attrStrings(attrs map[string]string) []string { - strings := make([]string, 0, len(attrs)) - for k, v := range attrs { - strings = append(strings, fmt.Sprintf("%s = %q", k, v)) - } - sort.Strings(strings) - return strings -} - -// Provide a bytes.Buffer like structure, which will indent when starting a -// newline. -type indentWriter struct { - bytes.Buffer - level int -} - -func (w *indentWriter) indent() { - newline := []byte("\n") - if !bytes.HasSuffix(w.Bytes(), newline) { - return - } - for i := 0; i < w.level; i++ { - w.Buffer.WriteString("\t") - } -} - -// Indent increases indentation by 1 -func (w *indentWriter) Indent() { w.level++ } - -// Unindent decreases indentation by 1 -func (w *indentWriter) Unindent() { w.level-- } - -// the following methods intercecpt the byte.Buffer writes and insert the -// indentation when starting a new line. -func (w *indentWriter) Write(b []byte) (int, error) { - w.indent() - return w.Buffer.Write(b) -} - -func (w *indentWriter) WriteString(s string) (int, error) { - w.indent() - return w.Buffer.WriteString(s) -} -func (w *indentWriter) WriteByte(b byte) error { - w.indent() - return w.Buffer.WriteByte(b) -} -func (w *indentWriter) WriteRune(r rune) (int, error) { - w.indent() - return w.Buffer.WriteRune(r) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go deleted file mode 100644 index f0d99ee3..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/edge.go +++ /dev/null @@ -1,37 +0,0 @@ -package dag - -import ( - "fmt" -) - -// Edge represents an edge in the graph, with a source and target vertex. -type Edge interface { - Source() Vertex - Target() Vertex - - Hashable -} - -// BasicEdge returns an Edge implementation that simply tracks the source -// and target given as-is. -func BasicEdge(source, target Vertex) Edge { - return &basicEdge{S: source, T: target} -} - -// basicEdge is a basic implementation of Edge that has the source and -// target vertex. -type basicEdge struct { - S, T Vertex -} - -func (e *basicEdge) Hashcode() interface{} { - return fmt.Sprintf("%p-%p", e.S, e.T) -} - -func (e *basicEdge) Source() Vertex { - return e.S -} - -func (e *basicEdge) Target() Vertex { - return e.T -} diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go deleted file mode 100644 index 4ce0dbcc..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/graph.go +++ /dev/null @@ -1,329 +0,0 @@ -package dag - -import ( - "bytes" - "fmt" - "sort" -) - -// Graph is used to represent a dependency graph. -type Graph struct { - vertices Set - edges Set - downEdges map[interface{}]Set - upEdges map[interface{}]Set -} - -// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. -type Subgrapher interface { - Subgraph() Grapher -} - -// A Grapher is any type that returns a Grapher, mainly used to identify -// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they -// return themselves. -type Grapher interface { - DirectedGraph() Grapher -} - -// Vertex of the graph. -type Vertex interface{} - -// NamedVertex is an optional interface that can be implemented by Vertex -// to give it a human-friendly name that is used for outputting the graph. -type NamedVertex interface { - Vertex - Name() string -} - -func (g *Graph) DirectedGraph() Grapher { - return g -} - -// Vertices returns the list of all the vertices in the graph. -func (g *Graph) Vertices() []Vertex { - result := make([]Vertex, 0, len(g.vertices)) - for _, v := range g.vertices { - result = append(result, v.(Vertex)) - } - - return result -} - -// Edges returns the list of all the edges in the graph. -func (g *Graph) Edges() []Edge { - result := make([]Edge, 0, len(g.edges)) - for _, v := range g.edges { - result = append(result, v.(Edge)) - } - - return result -} - -// EdgesFrom returns the list of edges from the given source. -func (g *Graph) EdgesFrom(v Vertex) []Edge { - var result []Edge - from := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Source()) == from { - result = append(result, e) - } - } - - return result -} - -// EdgesTo returns the list of edges to the given target. -func (g *Graph) EdgesTo(v Vertex) []Edge { - var result []Edge - search := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Target()) == search { - result = append(result, e) - } - } - - return result -} - -// HasVertex checks if the given Vertex is present in the graph. -func (g *Graph) HasVertex(v Vertex) bool { - return g.vertices.Include(v) -} - -// HasEdge checks if the given Edge is present in the graph. -func (g *Graph) HasEdge(e Edge) bool { - return g.edges.Include(e) -} - -// Add adds a vertex to the graph. This is safe to call multiple time with -// the same Vertex. -func (g *Graph) Add(v Vertex) Vertex { - g.init() - g.vertices.Add(v) - return v -} - -// Remove removes a vertex from the graph. This will also remove any -// edges with this vertex as a source or target. -func (g *Graph) Remove(v Vertex) Vertex { - // Delete the vertex itself - g.vertices.Delete(v) - - // Delete the edges to non-existent things - for _, target := range g.DownEdges(v) { - g.RemoveEdge(BasicEdge(v, target)) - } - for _, source := range g.UpEdges(v) { - g.RemoveEdge(BasicEdge(source, v)) - } - - return nil -} - -// Replace replaces the original Vertex with replacement. If the original -// does not exist within the graph, then false is returned. Otherwise, true -// is returned. -func (g *Graph) Replace(original, replacement Vertex) bool { - // If we don't have the original, we can't do anything - if !g.vertices.Include(original) { - return false - } - - // If they're the same, then don't do anything - if original == replacement { - return true - } - - // Add our new vertex, then copy all the edges - g.Add(replacement) - for _, target := range g.DownEdges(original) { - g.Connect(BasicEdge(replacement, target)) - } - for _, source := range g.UpEdges(original) { - g.Connect(BasicEdge(source, replacement)) - } - - // Remove our old vertex, which will also remove all the edges - g.Remove(original) - - return true -} - -// RemoveEdge removes an edge from the graph. -func (g *Graph) RemoveEdge(edge Edge) { - g.init() - - // Delete the edge from the set - g.edges.Delete(edge) - - // Delete the up/down edges - if s, ok := g.downEdges[hashcode(edge.Source())]; ok { - s.Delete(edge.Target()) - } - if s, ok := g.upEdges[hashcode(edge.Target())]; ok { - s.Delete(edge.Source()) - } -} - -// DownEdges returns the outward edges from the source Vertex v. -func (g *Graph) DownEdges(v Vertex) Set { - g.init() - return g.downEdges[hashcode(v)] -} - -// UpEdges returns the inward edges to the destination Vertex v. -func (g *Graph) UpEdges(v Vertex) Set { - g.init() - return g.upEdges[hashcode(v)] -} - -// Connect adds an edge with the given source and target. This is safe to -// call multiple times with the same value. Note that the same value is -// verified through pointer equality of the vertices, not through the -// value of the edge itself. -func (g *Graph) Connect(edge Edge) { - g.init() - - source := edge.Source() - target := edge.Target() - sourceCode := hashcode(source) - targetCode := hashcode(target) - - // Do we have this already? If so, don't add it again. - if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { - return - } - - // Add the edge to the set - g.edges.Add(edge) - - // Add the down edge - s, ok := g.downEdges[sourceCode] - if !ok { - s = make(Set) - g.downEdges[sourceCode] = s - } - s.Add(target) - - // Add the up edge - s, ok = g.upEdges[targetCode] - if !ok { - s = make(Set) - g.upEdges[targetCode] = s - } - s.Add(source) -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) StringWithNodeTypes() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - targetNodes := make(map[string]Vertex) - for _, target := range targets { - dep := VertexName(target) - deps = append(deps, dep) - targetNodes[dep] = target - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) - } - } - - return buf.String() -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) String() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s\n", name)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - for _, target := range targets { - deps = append(deps, VertexName(target)) - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s\n", d)) - } - } - - return buf.String() -} - -func (g *Graph) init() { - if g.vertices == nil { - g.vertices = make(Set) - } - if g.edges == nil { - g.edges = make(Set) - } - if g.downEdges == nil { - g.downEdges = make(map[interface{}]Set) - } - if g.upEdges == nil { - g.upEdges = make(map[interface{}]Set) - } -} - -// Dot returns a dot-formatted representation of the Graph. -func (g *Graph) Dot(opts *DotOpts) []byte { - return newMarshalGraph("", g).Dot(opts) -} - -// VertexName returns the name of a vertex. -func VertexName(raw Vertex) string { - switch v := raw.(type) { - case NamedVertex: - return v.Name() - case fmt.Stringer: - return fmt.Sprintf("%s", v) - default: - return fmt.Sprintf("%v", v) - } -} diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go deleted file mode 100644 index ebb8a0a6..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/marshal.go +++ /dev/null @@ -1,232 +0,0 @@ -package dag - -import ( - "fmt" - "reflect" - "sort" - "strconv" -) - -const ( - typeOperation = "Operation" - typeTransform = "Transform" - typeWalk = "Walk" - typeDepthFirstWalk = "DepthFirstWalk" - typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" - typeTransitiveReduction = "TransitiveReduction" - typeEdgeInfo = "EdgeInfo" - typeVertexInfo = "VertexInfo" - typeVisitInfo = "VisitInfo" -) - -// the marshal* structs are for serialization of the graph data. -type marshalGraph struct { - // Type is always "Graph", for identification as a top level object in the - // JSON stream. - Type string - - // Each marshal structure requires a unique ID so that it can be referenced - // by other structures. - ID string `json:",omitempty"` - - // Human readable name for this graph. - Name string `json:",omitempty"` - - // Arbitrary attributes that can be added to the output. - Attrs map[string]string `json:",omitempty"` - - // List of graph vertices, sorted by ID. - Vertices []*marshalVertex `json:",omitempty"` - - // List of edges, sorted by Source ID. - Edges []*marshalEdge `json:",omitempty"` - - // Any number of subgraphs. A subgraph itself is considered a vertex, and - // may be referenced by either end of an edge. - Subgraphs []*marshalGraph `json:",omitempty"` - - // Any lists of vertices that are included in cycles. - Cycles [][]*marshalVertex `json:",omitempty"` -} - -// The add, remove, connect, removeEdge methods mirror the basic Graph -// manipulations to reconstruct a marshalGraph from a debug log. -func (g *marshalGraph) add(v *marshalVertex) { - g.Vertices = append(g.Vertices, v) - sort.Sort(vertices(g.Vertices)) -} - -func (g *marshalGraph) remove(v *marshalVertex) { - for i, existing := range g.Vertices { - if v.ID == existing.ID { - g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) - return - } - } -} - -func (g *marshalGraph) connect(e *marshalEdge) { - g.Edges = append(g.Edges, e) - sort.Sort(edges(g.Edges)) -} - -func (g *marshalGraph) removeEdge(e *marshalEdge) { - for i, existing := range g.Edges { - if e.Source == existing.Source && e.Target == existing.Target { - g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) - return - } - } -} - -func (g *marshalGraph) vertexByID(id string) *marshalVertex { - for _, v := range g.Vertices { - if id == v.ID { - return v - } - } - return nil -} - -type marshalVertex struct { - // Unique ID, used to reference this vertex from other structures. - ID string - - // Human readable name - Name string `json:",omitempty"` - - Attrs map[string]string `json:",omitempty"` - - // This is to help transition from the old Dot interfaces. We record if the - // node was a GraphNodeDotter here, so we can call it to get attributes. - graphNodeDotter GraphNodeDotter -} - -func newMarshalVertex(v Vertex) *marshalVertex { - dn, ok := v.(GraphNodeDotter) - if !ok { - dn = nil - } - - return &marshalVertex{ - ID: marshalVertexID(v), - Name: VertexName(v), - Attrs: make(map[string]string), - graphNodeDotter: dn, - } -} - -// vertices is a sort.Interface implementation for sorting vertices by ID -type vertices []*marshalVertex - -func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } -func (v vertices) Len() int { return len(v) } -func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -type marshalEdge struct { - // Human readable name - Name string - - // Source and Target Vertices by ID - Source string - Target string - - Attrs map[string]string `json:",omitempty"` -} - -func newMarshalEdge(e Edge) *marshalEdge { - return &marshalEdge{ - Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), - Source: marshalVertexID(e.Source()), - Target: marshalVertexID(e.Target()), - Attrs: make(map[string]string), - } -} - -// edges is a sort.Interface implementation for sorting edges by Source ID -type edges []*marshalEdge - -func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } -func (e edges) Len() int { return len(e) } -func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } - -// build a marshalGraph structure from a *Graph -func newMarshalGraph(name string, g *Graph) *marshalGraph { - mg := &marshalGraph{ - Type: "Graph", - Name: name, - Attrs: make(map[string]string), - } - - for _, v := range g.Vertices() { - id := marshalVertexID(v) - if sg, ok := marshalSubgrapher(v); ok { - smg := newMarshalGraph(VertexName(v), sg) - smg.ID = id - mg.Subgraphs = append(mg.Subgraphs, smg) - } - - mv := newMarshalVertex(v) - mg.Vertices = append(mg.Vertices, mv) - } - - sort.Sort(vertices(mg.Vertices)) - - for _, e := range g.Edges() { - mg.Edges = append(mg.Edges, newMarshalEdge(e)) - } - - sort.Sort(edges(mg.Edges)) - - for _, c := range (&AcyclicGraph{*g}).Cycles() { - var cycle []*marshalVertex - for _, v := range c { - mv := newMarshalVertex(v) - cycle = append(cycle, mv) - } - mg.Cycles = append(mg.Cycles, cycle) - } - - return mg -} - -// Attempt to return a unique ID for any vertex. -func marshalVertexID(v Vertex) string { - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return strconv.Itoa(int(val.Pointer())) - case reflect.Interface: - return strconv.Itoa(int(val.InterfaceData()[1])) - } - - if v, ok := v.(Hashable); ok { - h := v.Hashcode() - if h, ok := h.(string); ok { - return h - } - } - - // fallback to a name, which we hope is unique. - return VertexName(v) - - // we could try harder by attempting to read the arbitrary value from the - // interface, but we shouldn't get here from terraform right now. -} - -// check for a Subgrapher, and return the underlying *Graph. -func marshalSubgrapher(v Vertex) (*Graph, bool) { - sg, ok := v.(Subgrapher) - if !ok { - return nil, false - } - - switch g := sg.Subgraph().DirectedGraph().(type) { - case *Graph: - return g, true - case *AcyclicGraph: - return &g.Graph, true - } - - return nil, false -} diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go deleted file mode 100644 index f3fd704b..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/set.go +++ /dev/null @@ -1,105 +0,0 @@ -package dag - -// Set is a set data structure. -type Set map[interface{}]interface{} - -// Hashable is the interface used by set to get the hash code of a value. -// If this isn't given, then the value of the item being added to the set -// itself is used as the comparison value. -type Hashable interface { - Hashcode() interface{} -} - -// hashcode returns the hashcode used for set elements. -func hashcode(v interface{}) interface{} { - if h, ok := v.(Hashable); ok { - return h.Hashcode() - } - - return v -} - -// Add adds an item to the set -func (s Set) Add(v interface{}) { - s[hashcode(v)] = v -} - -// Delete removes an item from the set. -func (s Set) Delete(v interface{}) { - delete(s, hashcode(v)) -} - -// Include returns true/false of whether a value is in the set. -func (s Set) Include(v interface{}) bool { - _, ok := s[hashcode(v)] - return ok -} - -// Intersection computes the set intersection with other. -func (s Set) Intersection(other Set) Set { - result := make(Set) - if s == nil { - return result - } - if other != nil { - for _, v := range s { - if other.Include(v) { - result.Add(v) - } - } - } - - return result -} - -// Difference returns a set with the elements that s has but -// other doesn't. -func (s Set) Difference(other Set) Set { - result := make(Set) - if s != nil { - for k, v := range s { - var ok bool - if other != nil { - _, ok = other[k] - } - if !ok { - result.Add(v) - } - } - } - - return result -} - -// Filter returns a set that contains the elements from the receiver -// where the given callback returns true. -func (s Set) Filter(cb func(interface{}) bool) Set { - result := make(Set) - - for _, v := range s { - if cb(v) { - result.Add(v) - } - } - - return result -} - -// Len is the number of items in the set. -func (s Set) Len() int { - return len(s) -} - -// List returns the list of set elements. -func (s Set) List() []interface{} { - if s == nil { - return nil - } - - r := make([]interface{}, 0, len(s)) - for _, v := range s { - r = append(r, v) - } - - return r -} diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go deleted file mode 100644 index 330abd58..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/tarjan.go +++ /dev/null @@ -1,107 +0,0 @@ -package dag - -// StronglyConnected returns the list of strongly connected components -// within the Graph g. This information is primarily used by this package -// for cycle detection, but strongly connected components have widespread -// use. -func StronglyConnected(g *Graph) [][]Vertex { - vs := g.Vertices() - acct := sccAcct{ - NextIndex: 1, - VertexIndex: make(map[Vertex]int, len(vs)), - } - for _, v := range vs { - // Recurse on any non-visited nodes - if acct.VertexIndex[v] == 0 { - stronglyConnected(&acct, g, v) - } - } - return acct.SCC -} - -func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { - // Initial vertex visit - index := acct.visit(v) - minIdx := index - - for _, raw := range g.DownEdges(v) { - target := raw.(Vertex) - targetIdx := acct.VertexIndex[target] - - // Recurse on successor if not yet visited - if targetIdx == 0 { - minIdx = min(minIdx, stronglyConnected(acct, g, target)) - } else if acct.inStack(target) { - // Check if the vertex is in the stack - minIdx = min(minIdx, targetIdx) - } - } - - // Pop the strongly connected components off the stack if - // this is a root vertex - if index == minIdx { - var scc []Vertex - for { - v2 := acct.pop() - scc = append(scc, v2) - if v2 == v { - break - } - } - - acct.SCC = append(acct.SCC, scc) - } - - return minIdx -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -// sccAcct is used ot pass around accounting information for -// the StronglyConnectedComponents algorithm -type sccAcct struct { - NextIndex int - VertexIndex map[Vertex]int - Stack []Vertex - SCC [][]Vertex -} - -// visit assigns an index and pushes a vertex onto the stack -func (s *sccAcct) visit(v Vertex) int { - idx := s.NextIndex - s.VertexIndex[v] = idx - s.NextIndex++ - s.push(v) - return idx -} - -// push adds a vertex to the stack -func (s *sccAcct) push(n Vertex) { - s.Stack = append(s.Stack, n) -} - -// pop removes a vertex from the stack -func (s *sccAcct) pop() Vertex { - n := len(s.Stack) - if n == 0 { - return nil - } - vertex := s.Stack[n-1] - s.Stack = s.Stack[:n-1] - return vertex -} - -// inStack checks if a vertex is in the stack -func (s *sccAcct) inStack(needle Vertex) bool { - for _, n := range s.Stack { - if n == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go deleted file mode 100644 index d7b202d7..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ /dev/null @@ -1,454 +0,0 @@ -package dag - -import ( - "errors" - "log" - "sync" - "time" - - "github.com/hashicorp/terraform/tfdiags" -) - -// Walker is used to walk every vertex of a graph in parallel. -// -// A vertex will only be walked when the dependencies of that vertex have -// been walked. If two vertices can be walked at the same time, they will be. -// -// Update can be called to update the graph. This can be called even during -// a walk, changing vertices/edges mid-walk. This should be done carefully. -// If a vertex is removed but has already been executed, the result of that -// execution (any error) is still returned by Wait. Changing or re-adding -// a vertex that has already executed has no effect. Changing edges of -// a vertex that has already executed has no effect. -// -// Non-parallelism can be enforced by introducing a lock in your callback -// function. However, the goroutine overhead of a walk will remain. -// Walker will create V*2 goroutines (one for each vertex, and dependency -// waiter for each vertex). In general this should be of no concern unless -// there are a huge number of vertices. -// -// The walk is depth first by default. This can be changed with the Reverse -// option. -// -// A single walker is only valid for one graph walk. After the walk is complete -// you must construct a new walker to walk again. State for the walk is never -// deleted in case vertices or edges are changed. -type Walker struct { - // Callback is what is called for each vertex - Callback WalkFunc - - // Reverse, if true, causes the source of an edge to depend on a target. - // When false (default), the target depends on the source. - Reverse bool - - // changeLock must be held to modify any of the fields below. Only Update - // should modify these fields. Modifying them outside of Update can cause - // serious problems. - changeLock sync.Mutex - vertices Set - edges Set - vertexMap map[Vertex]*walkerVertex - - // wait is done when all vertices have executed. It may become "undone" - // if new vertices are added. - wait sync.WaitGroup - - // diagsMap contains the diagnostics recorded so far for execution, - // and upstreamFailed contains all the vertices whose problems were - // caused by upstream failures, and thus whose diagnostics should be - // excluded from the final set. - // - // Readers and writers of either map must hold diagsLock. - diagsMap map[Vertex]tfdiags.Diagnostics - upstreamFailed map[Vertex]struct{} - diagsLock sync.Mutex -} - -func (w *Walker) init() { - if w.vertices == nil { - w.vertices = make(Set) - } - if w.edges == nil { - w.edges = make(Set) - } -} - -type walkerVertex struct { - // These should only be set once on initialization and never written again. - // They are not protected by a lock since they don't need to be since - // they are write-once. - - // DoneCh is closed when this vertex has completed execution, regardless - // of success. - // - // CancelCh is closed when the vertex should cancel execution. If execution - // is already complete (DoneCh is closed), this has no effect. Otherwise, - // execution is cancelled as quickly as possible. - DoneCh chan struct{} - CancelCh chan struct{} - - // Dependency information. Any changes to any of these fields requires - // holding DepsLock. - // - // DepsCh is sent a single value that denotes whether the upstream deps - // were successful (no errors). Any value sent means that the upstream - // dependencies are complete. No other values will ever be sent again. - // - // DepsUpdateCh is closed when there is a new DepsCh set. - DepsCh chan bool - DepsUpdateCh chan struct{} - DepsLock sync.Mutex - - // Below is not safe to read/write in parallel. This behavior is - // enforced by changes only happening in Update. Nothing else should - // ever modify these. - deps map[Vertex]chan struct{} - depsCancelCh chan struct{} -} - -// errWalkUpstream is used in the errMap of a walk to note that an upstream -// dependency failed so this vertex wasn't run. This is not shown in the final -// user-returned error. -var errWalkUpstream = errors.New("upstream dependency failed") - -// Wait waits for the completion of the walk and returns diagnostics describing -// any problems that arose. Update should be called to populate the walk with -// vertices and edges prior to calling this. -// -// Wait will return as soon as all currently known vertices are complete. -// If you plan on calling Update with more vertices in the future, you -// should not call Wait until after this is done. -func (w *Walker) Wait() tfdiags.Diagnostics { - // Wait for completion - w.wait.Wait() - - var diags tfdiags.Diagnostics - w.diagsLock.Lock() - for v, vDiags := range w.diagsMap { - if _, upstream := w.upstreamFailed[v]; upstream { - // Ignore diagnostics for nodes that had failed upstreams, since - // the downstream diagnostics are likely to be redundant. - continue - } - diags = diags.Append(vDiags) - } - w.diagsLock.Unlock() - - return diags -} - -// Update updates the currently executing walk with the given graph. -// This will perform a diff of the vertices and edges and update the walker. -// Already completed vertices remain completed (including any errors during -// their execution). -// -// This returns immediately once the walker is updated; it does not wait -// for completion of the walk. -// -// Multiple Updates can be called in parallel. Update can be called at any -// time during a walk. -func (w *Walker) Update(g *AcyclicGraph) { - w.init() - v := make(Set) - e := make(Set) - if g != nil { - v, e = g.vertices, g.edges - } - - // Grab the change lock so no more updates happen but also so that - // no new vertices are executed during this time since we may be - // removing them. - w.changeLock.Lock() - defer w.changeLock.Unlock() - - // Initialize fields - if w.vertexMap == nil { - w.vertexMap = make(map[Vertex]*walkerVertex) - } - - // Calculate all our sets - newEdges := e.Difference(w.edges) - oldEdges := w.edges.Difference(e) - newVerts := v.Difference(w.vertices) - oldVerts := w.vertices.Difference(v) - - // Add the new vertices - for _, raw := range newVerts { - v := raw.(Vertex) - - // Add to the waitgroup so our walk is not done until everything finishes - w.wait.Add(1) - - // Add to our own set so we know about it already - w.vertices.Add(raw) - - // Initialize the vertex info - info := &walkerVertex{ - DoneCh: make(chan struct{}), - CancelCh: make(chan struct{}), - deps: make(map[Vertex]chan struct{}), - } - - // Add it to the map and kick off the walk - w.vertexMap[v] = info - } - - // Remove the old vertices - for _, raw := range oldVerts { - v := raw.(Vertex) - - // Get the vertex info so we can cancel it - info, ok := w.vertexMap[v] - if !ok { - // This vertex for some reason was never in our map. This - // shouldn't be possible. - continue - } - - // Cancel the vertex - close(info.CancelCh) - - // Delete it out of the map - delete(w.vertexMap, v) - w.vertices.Delete(raw) - } - - // Add the new edges - changedDeps := make(Set) - for _, raw := range newEdges { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Get the info for the dep - depInfo, ok := w.vertexMap[dep] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Add the dependency to our waiter - waiterInfo.deps[dep] = depInfo.DoneCh - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - w.edges.Add(raw) - } - - // Process removed edges - for _, raw := range oldEdges { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Delete the dependency from the waiter - delete(waiterInfo.deps, dep) - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - w.edges.Delete(raw) - } - - // For each vertex with changed dependencies, we need to kick off - // a new waiter and notify the vertex of the changes. - for _, raw := range changedDeps { - v := raw.(Vertex) - info, ok := w.vertexMap[v] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Create a new done channel - doneCh := make(chan bool, 1) - - // Create the channel we close for cancellation - cancelCh := make(chan struct{}) - - // Build a new deps copy - deps := make(map[Vertex]<-chan struct{}) - for k, v := range info.deps { - deps[k] = v - } - - // Update the update channel - info.DepsLock.Lock() - if info.DepsUpdateCh != nil { - close(info.DepsUpdateCh) - } - info.DepsCh = doneCh - info.DepsUpdateCh = make(chan struct{}) - info.DepsLock.Unlock() - - // Cancel the older waiter - if info.depsCancelCh != nil { - close(info.depsCancelCh) - } - info.depsCancelCh = cancelCh - - // Start the waiter - go w.waitDeps(v, deps, doneCh, cancelCh) - } - - // Start all the new vertices. We do this at the end so that all - // the edge waiters and changes are setup above. - for _, raw := range newVerts { - v := raw.(Vertex) - go w.walkVertex(v, w.vertexMap[v]) - } -} - -// edgeParts returns the waiter and the dependency, in that order. -// The waiter is waiting on the dependency. -func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { - if w.Reverse { - return e.Source(), e.Target() - } - - return e.Target(), e.Source() -} - -// walkVertex walks a single vertex, waiting for any dependencies before -// executing the callback. -func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { - // When we're done executing, lower the waitgroup count - defer w.wait.Done() - - // When we're done, always close our done channel - defer close(info.DoneCh) - - // Wait for our dependencies. We create a [closed] deps channel so - // that we can immediately fall through to load our actual DepsCh. - var depsSuccess bool - var depsUpdateCh chan struct{} - depsCh := make(chan bool, 1) - depsCh <- true - close(depsCh) - for { - select { - case <-info.CancelCh: - // Cancel - return - - case depsSuccess = <-depsCh: - // Deps complete! Mark as nil to trigger completion handling. - depsCh = nil - - case <-depsUpdateCh: - // New deps, reloop - } - - // Check if we have updated dependencies. This can happen if the - // dependencies were satisfied exactly prior to an Update occurring. - // In that case, we'd like to take into account new dependencies - // if possible. - info.DepsLock.Lock() - if info.DepsCh != nil { - depsCh = info.DepsCh - info.DepsCh = nil - } - if info.DepsUpdateCh != nil { - depsUpdateCh = info.DepsUpdateCh - } - info.DepsLock.Unlock() - - // If we still have no deps channel set, then we're done! - if depsCh == nil { - break - } - } - - // If we passed dependencies, we just want to check once more that - // we're not cancelled, since this can happen just as dependencies pass. - select { - case <-info.CancelCh: - // Cancelled during an update while dependencies completed. - return - default: - } - - // Run our callback or note that our upstream failed - var diags tfdiags.Diagnostics - var upstreamFailed bool - if depsSuccess { - log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) - diags = w.Callback(v) - } else { - log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) - // This won't be displayed to the user because we'll set upstreamFailed, - // but we need to ensure there's at least one error in here so that - // the failures will cascade downstream. - diags = diags.Append(errors.New("upstream dependencies failed")) - upstreamFailed = true - } - - // Record the result (we must do this after execution because we mustn't - // hold diagsLock while visiting a vertex.) - w.diagsLock.Lock() - if w.diagsMap == nil { - w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) - } - w.diagsMap[v] = diags - if w.upstreamFailed == nil { - w.upstreamFailed = make(map[Vertex]struct{}) - } - if upstreamFailed { - w.upstreamFailed[v] = struct{}{} - } - w.diagsLock.Unlock() -} - -func (w *Walker) waitDeps( - v Vertex, - deps map[Vertex]<-chan struct{}, - doneCh chan<- bool, - cancelCh <-chan struct{}) { - - // For each dependency given to us, wait for it to complete - for dep, depCh := range deps { - DepSatisfied: - for { - select { - case <-depCh: - // Dependency satisfied! - break DepSatisfied - - case <-cancelCh: - // Wait cancelled. Note that we didn't satisfy dependencies - // so that anything waiting on us also doesn't run. - doneCh <- false - return - - case <-time.After(time.Second * 5): - log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", - VertexName(v), VertexName(dep)) - } - } - } - - // Dependencies satisfied! We need to check if any errored - w.diagsLock.Lock() - defer w.diagsLock.Unlock() - for dep := range deps { - if w.diagsMap[dep].HasErrors() { - // One of our dependencies failed, so return false - doneCh <- false - return - } - } - - // All dependencies satisfied and successful - doneCh <- true -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/doc.go b/vendor/github.com/hashicorp/terraform/experiments/doc.go deleted file mode 100644 index 5538d739..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package experiments contains the models and logic for opt-in experiments -// that can be activated for a particular Terraform module. -// -// We use experiments to get feedback on new configuration language features -// in a way that permits breaking changes without waiting for a future minor -// release. Any feature behind an experiment flag is subject to change in any -// way in even a patch release, until we have enough confidence about the -// design of the feature to make compatibility commitments about it. -package experiments diff --git a/vendor/github.com/hashicorp/terraform/experiments/errors.go b/vendor/github.com/hashicorp/terraform/experiments/errors.go deleted file mode 100644 index a1fdc6f5..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/errors.go +++ /dev/null @@ -1,26 +0,0 @@ -package experiments - -import ( - "fmt" -) - -// UnavailableError is the error type returned by GetCurrent when the requested -// experiment is not recognized at all. -type UnavailableError struct { - ExperimentName string -} - -func (e UnavailableError) Error() string { - return fmt.Sprintf("no current experiment is named %q", e.ExperimentName) -} - -// ConcludedError is the error type returned by GetCurrent when the requested -// experiment is recognized as concluded. -type ConcludedError struct { - ExperimentName string - Message string -} - -func (e ConcludedError) Error() string { - return fmt.Sprintf("experiment %q has concluded: %s", e.ExperimentName, e.Message) -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/experiment.go b/vendor/github.com/hashicorp/terraform/experiments/experiment.go deleted file mode 100644 index cac7d54f..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/experiment.go +++ /dev/null @@ -1,93 +0,0 @@ -package experiments - -// Experiment represents a particular experiment, which can be activated -// independently of all other experiments. -type Experiment string - -// All active and defunct experiments must be represented by constants whose -// internal string values are unique. -// -// Each of these declared constants must also be registered as either a -// current or a defunct experiment in the init() function below. -// -// Each experiment is represented by a string that must be a valid HCL -// identifier so that it can be specified in configuration. -const ( - VariableValidation = Experiment("variable_validation") -) - -func init() { - // Each experiment constant defined above must be registered here as either - // a current or a concluded experiment. - registerConcludedExperiment(VariableValidation, "Custom variable validation can now be used by default, without enabling an experiment.") -} - -// GetCurrent takes an experiment name and returns the experiment value -// representing that expression if and only if it is a current experiment. -// -// If the selected experiment is concluded, GetCurrent will return an -// error of type ConcludedError whose message hopefully includes some guidance -// for users of the experiment on how to migrate to a stable feature that -// succeeded it. -// -// If the selected experiment is not known at all, GetCurrent will return an -// error of type UnavailableError. -func GetCurrent(name string) (Experiment, error) { - exp := Experiment(name) - if currentExperiments.Has(exp) { - return exp, nil - } - - if msg, concluded := concludedExperiments[exp]; concluded { - return Experiment(""), ConcludedError{ExperimentName: name, Message: msg} - } - - return Experiment(""), UnavailableError{ExperimentName: name} -} - -// Keyword returns the keyword that would be used to activate this experiment -// in the configuration. -func (e Experiment) Keyword() string { - return string(e) -} - -// IsCurrent returns true if the receiver is considered a currently-selectable -// experiment. -func (e Experiment) IsCurrent() bool { - return currentExperiments.Has(e) -} - -// IsConcluded returns true if the receiver is a concluded experiment. -func (e Experiment) IsConcluded() bool { - _, exists := concludedExperiments[e] - return exists -} - -// currentExperiments are those which are available to activate in the current -// version of Terraform. -// -// Members of this set are registered in the init function above. -var currentExperiments = make(Set) - -// concludedExperiments are those which were available to activate in an earlier -// version of Terraform but are no longer available, either because the feature -// in question has been implemented or because the experiment failed and the -// feature was abandoned. Each experiment maps to a message describing the -// outcome, so we can give users feedback about what they might do in modules -// using concluded experiments. -// -// After an experiment has been concluded for a whole major release span it can -// be removed, since we expect users to perform upgrades one major release at -// at time without skipping and thus they will see the concludedness error -// message as they upgrade through a prior major version. -// -// Members of this map are registered in the init function above. -var concludedExperiments = make(map[Experiment]string) - -func registerCurrentExperiment(exp Experiment) { - currentExperiments.Add(exp) -} - -func registerConcludedExperiment(exp Experiment, message string) { - concludedExperiments[exp] = message -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/set.go b/vendor/github.com/hashicorp/terraform/experiments/set.go deleted file mode 100644 index 8247e212..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/set.go +++ /dev/null @@ -1,46 +0,0 @@ -package experiments - -// Set is a collection of experiments where every experiment is either a member -// or not. -type Set map[Experiment]struct{} - -// NewSet constructs a new Set with the given experiments as its initial members. -func NewSet(exps ...Experiment) Set { - ret := make(Set) - for _, exp := range exps { - ret.Add(exp) - } - return ret -} - -// SetUnion constructs a new Set containing the members of all of the given -// sets. -func SetUnion(sets ...Set) Set { - ret := make(Set) - for _, set := range sets { - for exp := range set { - ret.Add(exp) - } - } - return ret -} - -// Add inserts the given experiment into the set. -// -// If the given experiment is already present then this is a no-op. -func (s Set) Add(exp Experiment) { - s[exp] = struct{}{} -} - -// Remove takes the given experiment out of the set. -// -// If the given experiment not already present then this is a no-op. -func (s Set) Remove(exp Experiment) { - delete(s, exp) -} - -// Has tests whether the given experiment is in the receiving set. -func (s Set) Has(exp Experiment) bool { - _, ok := s[exp] - return ok -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/testing.go b/vendor/github.com/hashicorp/terraform/experiments/testing.go deleted file mode 100644 index 54ff2dfd..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/testing.go +++ /dev/null @@ -1,33 +0,0 @@ -package experiments - -import ( - "testing" -) - -// OverrideForTesting temporarily overrides the global tables -// of experiments in order to allow for a predictable set when unit testing -// the experiments infrastructure code. -// -// The correct way to use this function is to defer a call to its result so -// that the original tables can be restored at the conclusion of the calling -// test: -// -// defer experiments.OverrideForTesting(t, current, concluded)() -// -// This function modifies global variables that are normally fixed throughout -// our execution, so this function must not be called from non-test code and -// any test using it cannot safely run concurrently with other tests. -func OverrideForTesting(t *testing.T, current Set, concluded map[Experiment]string) func() { - // We're not currently using the given *testing.T in here, but we're - // requiring it anyway in case we might need it in future, and because - // it hopefully reinforces that only test code should be calling this. - - realCurrents := currentExperiments - realConcludeds := concludedExperiments - currentExperiments = current - concludedExperiments = concluded - return func() { - currentExperiments = realCurrents - concludedExperiments = realConcludeds - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go deleted file mode 100644 index 54899bc6..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go +++ /dev/null @@ -1,24 +0,0 @@ -package didyoumean - -import ( - "github.com/agext/levenshtein" -) - -// NameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func NameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go deleted file mode 100644 index 67be1df1..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go +++ /dev/null @@ -1,41 +0,0 @@ -package hilmapstructure - -import ( - "fmt" - "reflect" - - "github.com/mitchellh/mapstructure" -) - -var hilMapstructureDecodeHookEmptySlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookEmptyMap map[string]interface{} - -// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a -// DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func WeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}") - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/indent.go b/vendor/github.com/hashicorp/terraform/helper/logging/indent.go deleted file mode 100644 index e0da0d7c..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/indent.go +++ /dev/null @@ -1,23 +0,0 @@ -package logging - -import ( - "strings" -) - -// Indent adds two spaces to the beginning of each line of the given string, -// with the goal of making the log level filter understand it as a line -// continuation rather than possibly as new log lines. -func Indent(s string) string { - var b strings.Builder - for len(s) > 0 { - end := strings.IndexByte(s, '\n') - if end == -1 { - end = len(s) - 1 - } - var l string - l, s = s[:end+1], s[end+1:] - b.WriteString(" ") - b.WriteString(l) - } - return b.String() -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/level.go b/vendor/github.com/hashicorp/terraform/helper/logging/level.go deleted file mode 100644 index 0dc4dfe8..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/level.go +++ /dev/null @@ -1,159 +0,0 @@ -package logging - -import ( - "bytes" - "io" - "sync" -) - -// LogLevel is a special string, conventionally written all in uppercase, that -// can be used to mark a log line for filtering and to specify filtering -// levels in the LevelFilter type. -type LogLevel string - -// LevelFilter is an io.Writer that can be used with a logger that -// will attempt to filter out log messages that aren't at least a certain -// level. -// -// This filtering is HEURISTIC-BASED, and so will not be 100% reliable. The -// assumptions it makes are: -// -// - Individual log messages are never split across multiple calls to the -// Write method. -// -// - Messages that carry levels are marked by a sequence starting with "[", -// then the level name string, and then "]". Any message without a sequence -// like this is an un-levelled message, and is not subject to filtering. -// -// - Each \n-delimited line in a write is a separate log message, unless a -// line starts with at least one space in which case it is interpreted -// as a continuation of the previous line. -// -// - If a log line starts with a non-whitespace character that isn't a digit -// then it's recognized as a degenerate continuation, because "real" log -// lines should start with a date/time and thus always have a leading -// digit. (This also cleans up after some situations where the assumptuion -// that messages arrive atomically aren't met, which is sadly sometimes -// true for longer messages that trip over some buffering behavior in -// panicwrap.) -// -// Because logging is a cross-cutting concern and not fully under the control -// of Terraform itself, there will certainly be cases where the above -// heuristics will fail. For example, it is likely that LevelFilter will -// occasionally misinterpret a continuation line as a new message because the -// code generating it doesn't know about our indentation convention. -// -// Our goal here is just to make a best effort to reduce the log volume, -// accepting that the results will not be 100% correct. -// -// Logging calls within Terraform Core should follow the above conventions so -// that the log output is broadly correct, however. -// -// Once the filter is in use somewhere, it is not safe to modify -// the structure. -type LevelFilter struct { - // Levels is the list of log levels, in increasing order of - // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. - Levels []LogLevel - - // MinLevel is the minimum level allowed through - MinLevel LogLevel - - // The underlying io.Writer where log messages that pass the filter - // will be set. - Writer io.Writer - - badLevels map[LogLevel]struct{} - show bool - once sync.Once -} - -// Check will check a given line if it would be included in the level -// filter. -func (f *LevelFilter) Check(line []byte) bool { - f.once.Do(f.init) - - // Check for a log level - var level LogLevel - x := bytes.IndexByte(line, '[') - if x >= 0 { - y := bytes.IndexByte(line[x:], ']') - if y >= 0 { - level = LogLevel(line[x+1 : x+y]) - } - } - - //return level == "" - - _, ok := f.badLevels[level] - return !ok -} - -// Write is a specialized implementation of io.Writer suitable for being -// the output of a logger from the "log" package. -// -// This Writer implementation assumes that it will only recieve byte slices -// containing one or more entire lines of log output, each one terminated by -// a newline. This is compatible with the behavior of the "log" package -// directly, and is also tolerant of intermediaries that might buffer multiple -// separate writes together, as long as no individual log line is ever -// split into multiple slices. -// -// Behavior is undefined if any log line is split across multiple writes or -// written without a trailing '\n' delimiter. -func (f *LevelFilter) Write(p []byte) (n int, err error) { - for len(p) > 0 { - // Split at the first \n, inclusive - idx := bytes.IndexByte(p, '\n') - if idx == -1 { - // Invalid, undelimited write. We'll tolerate it assuming that - // our assumptions are being violated, but the results may be - // non-ideal. - idx = len(p) - 1 - break - } - var l []byte - l, p = p[:idx+1], p[idx+1:] - // Lines starting with characters other than decimal digits (including - // whitespace) are assumed to be continuations lines. This is an - // imprecise heuristic, but experimentally it seems to generate - // "good enough" results from Terraform Core's own logging. Its mileage - // may vary with output from other systems. - if l[0] >= '0' && l[0] <= '9' { - f.show = f.Check(l) - } - if f.show { - _, err = f.Writer.Write(l) - if err != nil { - // Technically it's not correct to say we've written the whole - // buffer, but for our purposes here it's good enough as we're - // only implementing io.Writer enough to satisfy logging - // use-cases. - return len(p), err - } - } - } - - // We always behave as if we wrote the whole of the buffer, even if - // we actually skipped some lines. We're only implementiong io.Writer - // enough to satisfy logging use-cases. - return len(p), nil -} - -// SetMinLevel is used to update the minimum log level -func (f *LevelFilter) SetMinLevel(min LogLevel) { - f.MinLevel = min - f.init() -} - -func (f *LevelFilter) init() { - badLevels := make(map[LogLevel]struct{}) - for _, level := range f.Levels { - if level == f.MinLevel { - break - } - badLevels[level] = struct{}{} - } - f.badLevels = badLevels - f.show = true -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go deleted file mode 100644 index 75627cf0..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go +++ /dev/null @@ -1,109 +0,0 @@ -package logging - -import ( - "io" - "io/ioutil" - "log" - "os" - "strings" - "syscall" -) - -// These are the environmental variables that determine if we log, and if -// we log whether or not the log should go to a file. -const ( - EnvLog = "TF_LOG" // Set to True - EnvLogFile = "TF_LOG_PATH" // Set to a file -) - -// ValidLevels are the log level names that Terraform recognizes. -var ValidLevels = []LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} - -// LogOutput determines where we should send logs (if anywhere) and the log level. -func LogOutput() (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := CurrentLogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - if logPath := os.Getenv(EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - if logLevel == "TRACE" { - // Just pass through logs directly then, without any level filtering at all. - return logOutput, nil - } - - // Otherwise we'll use our level filter, which is a heuristic-based - // best effort thing that is not totally reliable but helps to reduce - // the volume of logs in some cases. - logOutput = &LevelFilter{ - Levels: ValidLevels, - MinLevel: LogLevel(logLevel), - Writer: logOutput, - } - - return logOutput, nil -} - -// SetOutput checks for a log destination with LogOutput, and calls -// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses -// ioutil.Discard. Any error from LogOutout is fatal. -func SetOutput() { - out, err := LogOutput() - if err != nil { - log.Fatal(err) - } - - if out == nil { - out = ioutil.Discard - } - - log.SetOutput(out) -} - -// CurrentLogLevel returns the current log level string based the environment vars -func CurrentLogLevel() string { - envLevel := os.Getenv(EnvLog) - if envLevel == "" { - return "" - } - - logLevel := "TRACE" - if isValidLogLevel(envLevel) { - // allow following for better ux: info, Info or INFO - logLevel = strings.ToUpper(envLevel) - } else { - log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, ValidLevels) - } - if logLevel != "TRACE" { - log.Printf("[WARN] Log levels other than TRACE are currently unreliable, and are supported only for backward compatibility.\n Use TF_LOG=TRACE to see Terraform's internal logs.\n ----") - } - - return logLevel -} - -// IsDebugOrHigher returns whether or not the current log level is debug or trace -func IsDebugOrHigher() bool { - level := string(CurrentLogLevel()) - return level == "DEBUG" || level == "TRACE" -} - -func isValidLogLevel(level string) bool { - for _, l := range ValidLevels { - if strings.ToUpper(level) == string(l) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go deleted file mode 100644 index bddabe64..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go +++ /dev/null @@ -1,70 +0,0 @@ -package logging - -import ( - "bytes" - "encoding/json" - "log" - "net/http" - "net/http/httputil" - "strings" -) - -type transport struct { - name string - transport http.RoundTripper -} - -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - if IsDebugOrHigher() { - reqData, err := httputil.DumpRequestOut(req, true) - if err == nil { - log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) - } else { - log.Printf("[ERROR] %s API Request error: %#v", t.name, err) - } - } - - resp, err := t.transport.RoundTrip(req) - if err != nil { - return resp, err - } - - if IsDebugOrHigher() { - respData, err := httputil.DumpResponse(resp, true) - if err == nil { - log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) - } else { - log.Printf("[ERROR] %s API Response error: %#v", t.name, err) - } - } - - return resp, nil -} - -func NewTransport(name string, t http.RoundTripper) *transport { - return &transport{name, t} -} - -// prettyPrintJsonLines iterates through a []byte line-by-line, -// transforming any lines that are complete json into pretty-printed json. -func prettyPrintJsonLines(b []byte) string { - parts := strings.Split(string(b), "\n") - for i, p := range parts { - if b := []byte(p); json.Valid(b) { - var out bytes.Buffer - json.Indent(&out, b, "", " ") - parts[i] = out.String() - } - } - return strings.Join(parts, "\n") -} - -const logReqMsg = `%s API Request Details: ----[ REQUEST ]--------------------------------------- -%s ------------------------------------------------------` - -const logRespMsg = `%s API Response Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform/httpclient/client.go b/vendor/github.com/hashicorp/terraform/httpclient/client.go deleted file mode 100644 index bb06beb4..00000000 --- a/vendor/github.com/hashicorp/terraform/httpclient/client.go +++ /dev/null @@ -1,18 +0,0 @@ -package httpclient - -import ( - "net/http" - - cleanhttp "github.com/hashicorp/go-cleanhttp" -) - -// New returns the DefaultPooledClient from the cleanhttp -// package that will also send a Terraform User-Agent string. -func New() *http.Client { - cli := cleanhttp.DefaultPooledClient() - cli.Transport = &userAgentRoundTripper{ - userAgent: UserAgentString(), - inner: cli.Transport, - } - return cli -} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go deleted file mode 100644 index 536703c6..00000000 --- a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go +++ /dev/null @@ -1,56 +0,0 @@ -package httpclient - -import ( - "fmt" - "log" - "net/http" - "os" - "strings" - - "github.com/hashicorp/terraform/version" -) - -const userAgentFormat = "Terraform/%s" -const uaEnvVar = "TF_APPEND_USER_AGENT" - -// Deprecated: Use UserAgent(version) instead -func UserAgentString() string { - ua := fmt.Sprintf(userAgentFormat, version.Version) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} - -type userAgentRoundTripper struct { - inner http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) - return rt.inner.RoundTrip(req) -} - -func TerraformUserAgent(version string) string { - ua := fmt.Sprintf("HashiCorp Terraform/%s (+https://www.terraform.io)", version) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} diff --git a/vendor/github.com/hashicorp/terraform/instances/expander.go b/vendor/github.com/hashicorp/terraform/instances/expander.go deleted file mode 100644 index 1a2f1dd2..00000000 --- a/vendor/github.com/hashicorp/terraform/instances/expander.go +++ /dev/null @@ -1,362 +0,0 @@ -package instances - -import ( - "fmt" - "sort" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// Expander instances serve as a coordination point for gathering object -// repetition values (count and for_each in configuration) and then later -// making use of them to fully enumerate all of the instances of an object. -// -// The two repeatable object types in Terraform are modules and resources. -// Because resources belong to modules and modules can nest inside other -// modules, module expansion in particular has a recursive effect that can -// cause deep objects to expand exponentially. Expander assumes that all -// instances of a module have the same static objects inside, and that they -// differ only in the repetition count for some of those objects. -// -// Expander is a synchronized object whose methods can be safely called -// from concurrent threads of execution. However, it does expect a certain -// sequence of operations which is normally obtained by the caller traversing -// a dependency graph: each object must have its repetition mode set exactly -// once, and this must be done before any calls that depend on the repetition -// mode. In other words, the count or for_each expression value for a module -// must be provided before any object nested directly or indirectly inside -// that module can be expanded. If this ordering is violated, the methods -// will panic to enforce internal consistency. -// -// The Expand* methods of Expander only work directly with modules and with -// resources. Addresses for other objects that nest within modules but -// do not themselves support repetition can be obtained by calling ExpandModule -// with the containing module path and then producing one absolute instance -// address per module instance address returned. -type Expander struct { - mu sync.RWMutex - exps *expanderModule -} - -// NewExpander initializes and returns a new Expander, empty and ready to use. -func NewExpander() *Expander { - return &Expander{ - exps: newExpanderModule(), - } -} - -// SetModuleSingle records that the given module call inside the given parent -// module does not use any repetition arguments and is therefore a singleton. -func (e *Expander) SetModuleSingle(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall) { - e.setModuleExpansion(parentAddr, callAddr, expansionSingleVal) -} - -// SetModuleCount records that the given module call inside the given parent -// module instance uses the "count" repetition argument, with the given value. -func (e *Expander) SetModuleCount(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, count int) { - e.setModuleExpansion(parentAddr, callAddr, expansionCount(count)) -} - -// SetModuleForEach records that the given module call inside the given parent -// module instance uses the "for_each" repetition argument, with the given -// map value. -// -// In the configuration language the for_each argument can also accept a set. -// It's the caller's responsibility to convert that into an identity map before -// calling this method. -func (e *Expander) SetModuleForEach(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, mapping map[string]cty.Value) { - e.setModuleExpansion(parentAddr, callAddr, expansionForEach(mapping)) -} - -// SetResourceSingle records that the given resource inside the given module -// does not use any repetition arguments and is therefore a singleton. -func (e *Expander) SetResourceSingle(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource) { - e.setResourceExpansion(moduleAddr, resourceAddr, expansionSingleVal) -} - -// SetResourceCount records that the given resource inside the given module -// uses the "count" repetition argument, with the given value. -func (e *Expander) SetResourceCount(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, count int) { - e.setResourceExpansion(moduleAddr, resourceAddr, expansionCount(count)) -} - -// SetResourceForEach records that the given resource inside the given module -// uses the "for_each" repetition argument, with the given map value. -// -// In the configuration language the for_each argument can also accept a set. -// It's the caller's responsibility to convert that into an identity map before -// calling this method. -func (e *Expander) SetResourceForEach(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, mapping map[string]cty.Value) { - e.setResourceExpansion(moduleAddr, resourceAddr, expansionForEach(mapping)) -} - -// ExpandModule finds the exhaustive set of module instances resulting from -// the expansion of the given module and all of its ancestor modules. -// -// All of the modules on the path to the identified module must already have -// had their expansion registered using one of the SetModule* methods before -// calling, or this method will panic. -func (e *Expander) ExpandModule(addr addrs.Module) []addrs.ModuleInstance { - if len(addr) == 0 { - // Root module is always a singleton. - return singletonRootModule - } - - e.mu.RLock() - defer e.mu.RUnlock() - - // We're going to be dynamically growing ModuleInstance addresses, so - // we'll preallocate some space to do it so that for typical shallow - // module trees we won't need to reallocate this. - // (moduleInstances does plenty of allocations itself, so the benefit of - // pre-allocating this is marginal but it's not hard to do.) - parentAddr := make(addrs.ModuleInstance, 0, 4) - ret := e.exps.moduleInstances(addr, parentAddr) - sort.SliceStable(ret, func(i, j int) bool { - return ret[i].Less(ret[j]) - }) - return ret -} - -// ExpandModuleResource finds the exhaustive set of resource instances resulting from -// the expansion of the given resource and all of its containing modules. -// -// All of the modules on the path to the identified resource and the resource -// itself must already have had their expansion registered using one of the -// SetModule*/SetResource* methods before calling, or this method will panic. -func (e *Expander) ExpandModuleResource(moduleAddr addrs.Module, resourceAddr addrs.Resource) []addrs.AbsResourceInstance { - e.mu.RLock() - defer e.mu.RUnlock() - - // We're going to be dynamically growing ModuleInstance addresses, so - // we'll preallocate some space to do it so that for typical shallow - // module trees we won't need to reallocate this. - // (moduleInstances does plenty of allocations itself, so the benefit of - // pre-allocating this is marginal but it's not hard to do.) - moduleInstanceAddr := make(addrs.ModuleInstance, 0, 4) - ret := e.exps.moduleResourceInstances(moduleAddr, resourceAddr, moduleInstanceAddr) - sort.SliceStable(ret, func(i, j int) bool { - return ret[i].Less(ret[j]) - }) - return ret -} - -// ExpandResource finds the set of resource instances resulting from -// the expansion of the given resource within its module instance. -// -// All of the modules on the path to the identified resource and the resource -// itself must already have had their expansion registered using one of the -// SetModule*/SetResource* methods before calling, or this method will panic. -func (e *Expander) ExpandResource(resourceAddr addrs.AbsResource) []addrs.AbsResourceInstance { - e.mu.RLock() - defer e.mu.RUnlock() - - moduleInstanceAddr := make(addrs.ModuleInstance, 0, 4) - ret := e.exps.resourceInstances(resourceAddr.Module, resourceAddr.Resource, moduleInstanceAddr) - sort.SliceStable(ret, func(i, j int) bool { - return ret[i].Less(ret[j]) - }) - return ret -} - -// GetModuleInstanceRepetitionData returns an object describing the values -// that should be available for each.key, each.value, and count.index within -// the call block for the given module instance. -func (e *Expander) GetModuleInstanceRepetitionData(addr addrs.ModuleInstance) RepetitionData { - if len(addr) == 0 { - // The root module is always a singleton, so it has no repetition data. - return RepetitionData{} - } - - e.mu.RLock() - defer e.mu.RUnlock() - - parentMod := e.findModule(addr[:len(addr)-1]) - lastStep := addr[len(addr)-1] - exp, ok := parentMod.moduleCalls[addrs.ModuleCall{Name: lastStep.Name}] - if !ok { - panic(fmt.Sprintf("no expansion has been registered for %s", addr)) - } - return exp.repetitionData(lastStep.InstanceKey) -} - -// GetResourceInstanceRepetitionData returns an object describing the values -// that should be available for each.key, each.value, and count.index within -// the definition block for the given resource instance. -func (e *Expander) GetResourceInstanceRepetitionData(addr addrs.AbsResourceInstance) RepetitionData { - e.mu.RLock() - defer e.mu.RUnlock() - - parentMod := e.findModule(addr.Module) - exp, ok := parentMod.resources[addr.Resource.Resource] - if !ok { - panic(fmt.Sprintf("no expansion has been registered for %s", addr.ContainingResource())) - } - return exp.repetitionData(addr.Resource.Key) -} - -func (e *Expander) findModule(moduleInstAddr addrs.ModuleInstance) *expanderModule { - // We expect that all of the modules on the path to our module instance - // should already have expansions registered. - mod := e.exps - for i, step := range moduleInstAddr { - next, ok := mod.childInstances[step] - if !ok { - // Top-down ordering of registration is part of the contract of - // Expander, so this is always indicative of a bug in the caller. - panic(fmt.Sprintf("no expansion has been registered for ancestor module %s", moduleInstAddr[:i+1])) - } - mod = next - } - return mod -} - -func (e *Expander) setModuleExpansion(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, exp expansion) { - e.mu.Lock() - defer e.mu.Unlock() - - mod := e.findModule(parentAddr) - if _, exists := mod.moduleCalls[callAddr]; exists { - panic(fmt.Sprintf("expansion already registered for %s", parentAddr.Child(callAddr.Name, addrs.NoKey))) - } - // We'll also pre-register the child instances so that later calls can - // populate them as the caller traverses the configuration tree. - for _, key := range exp.instanceKeys() { - step := addrs.ModuleInstanceStep{Name: callAddr.Name, InstanceKey: key} - mod.childInstances[step] = newExpanderModule() - } - mod.moduleCalls[callAddr] = exp -} - -func (e *Expander) setResourceExpansion(parentAddr addrs.ModuleInstance, resourceAddr addrs.Resource, exp expansion) { - e.mu.Lock() - defer e.mu.Unlock() - - mod := e.findModule(parentAddr) - if _, exists := mod.resources[resourceAddr]; exists { - panic(fmt.Sprintf("expansion already registered for %s", resourceAddr.Absolute(parentAddr))) - } - mod.resources[resourceAddr] = exp -} - -type expanderModule struct { - moduleCalls map[addrs.ModuleCall]expansion - resources map[addrs.Resource]expansion - childInstances map[addrs.ModuleInstanceStep]*expanderModule -} - -func newExpanderModule() *expanderModule { - return &expanderModule{ - moduleCalls: make(map[addrs.ModuleCall]expansion), - resources: make(map[addrs.Resource]expansion), - childInstances: make(map[addrs.ModuleInstanceStep]*expanderModule), - } -} - -var singletonRootModule = []addrs.ModuleInstance{addrs.RootModuleInstance} - -func (m *expanderModule) moduleInstances(addr addrs.Module, parentAddr addrs.ModuleInstance) []addrs.ModuleInstance { - callName := addr[0] - exp, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}] - if !ok { - // This is a bug in the caller, because it should always register - // expansions for an object and all of its ancestors before requesting - // expansion of it. - panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) - } - - var ret []addrs.ModuleInstance - - // If there's more than one step remaining then we need to traverse deeper. - if len(addr) > 1 { - for step, inst := range m.childInstances { - if step.Name != callName { - continue - } - instAddr := append(parentAddr, step) - ret = append(ret, inst.moduleInstances(addr[1:], instAddr)...) - } - return ret - } - - // Otherwise, we'll use the expansion from the final step to produce - // a sequence of addresses under this prefix. - for _, k := range exp.instanceKeys() { - // We're reusing the buffer under parentAddr as we recurse through - // the structure, so we need to copy it here to produce a final - // immutable slice to return. - full := make(addrs.ModuleInstance, 0, len(parentAddr)+1) - full = append(full, parentAddr...) - full = full.Child(callName, k) - ret = append(ret, full) - } - return ret -} - -func (m *expanderModule) moduleResourceInstances(moduleAddr addrs.Module, resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { - if len(moduleAddr) > 0 { - var ret []addrs.AbsResourceInstance - // We need to traverse through the module levels first, so we can - // then iterate resource expansions in the context of each module - // path leading to them. - callName := moduleAddr[0] - if _, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { - // This is a bug in the caller, because it should always register - // expansions for an object and all of its ancestors before requesting - // expansion of it. - panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) - } - - for step, inst := range m.childInstances { - if step.Name != callName { - continue - } - moduleInstAddr := append(parentAddr, step) - ret = append(ret, inst.moduleResourceInstances(moduleAddr[1:], resourceAddr, moduleInstAddr)...) - } - return ret - } - - return m.onlyResourceInstances(resourceAddr, parentAddr) -} - -func (m *expanderModule) resourceInstances(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { - if len(moduleAddr) > 0 { - // We need to traverse through the module levels first, using only the - // module instances for our specific resource, as the resource may not - // yet be expanded in all module instances. - step := moduleAddr[0] - callName := step.Name - if _, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { - // This is a bug in the caller, because it should always register - // expansions for an object and all of its ancestors before requesting - // expansion of it. - panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) - } - - inst := m.childInstances[step] - moduleInstAddr := append(parentAddr, step) - return inst.resourceInstances(moduleAddr[1:], resourceAddr, moduleInstAddr) - } - return m.onlyResourceInstances(resourceAddr, parentAddr) -} - -func (m *expanderModule) onlyResourceInstances(resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { - var ret []addrs.AbsResourceInstance - exp, ok := m.resources[resourceAddr] - if !ok { - panic(fmt.Sprintf("no expansion has been registered for %s", resourceAddr.Absolute(parentAddr))) - } - - for _, k := range exp.instanceKeys() { - // We're reusing the buffer under parentAddr as we recurse through - // the structure, so we need to copy it here to produce a final - // immutable slice to return. - moduleAddr := make(addrs.ModuleInstance, len(parentAddr)) - copy(moduleAddr, parentAddr) - ret = append(ret, resourceAddr.Instance(k).Absolute(moduleAddr)) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/instances/expansion_mode.go b/vendor/github.com/hashicorp/terraform/instances/expansion_mode.go deleted file mode 100644 index be339343..00000000 --- a/vendor/github.com/hashicorp/terraform/instances/expansion_mode.go +++ /dev/null @@ -1,85 +0,0 @@ -package instances - -import ( - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" -) - -// expansion is an internal interface used to represent the different -// ways expansion can operate depending on how repetition is configured for -// an object. -type expansion interface { - instanceKeys() []addrs.InstanceKey - repetitionData(addrs.InstanceKey) RepetitionData -} - -// expansionSingle is the expansion corresponding to no repetition arguments -// at all, producing a single object with no key. -// -// expansionSingleVal is the only valid value of this type. -type expansionSingle uintptr - -var singleKeys = []addrs.InstanceKey{addrs.NoKey} -var expansionSingleVal expansionSingle - -func (e expansionSingle) instanceKeys() []addrs.InstanceKey { - return singleKeys -} - -func (e expansionSingle) repetitionData(key addrs.InstanceKey) RepetitionData { - if key != addrs.NoKey { - panic("cannot use instance key with non-repeating object") - } - return RepetitionData{} -} - -// expansionCount is the expansion corresponding to the "count" argument. -type expansionCount int - -func (e expansionCount) instanceKeys() []addrs.InstanceKey { - ret := make([]addrs.InstanceKey, int(e)) - for i := range ret { - ret[i] = addrs.IntKey(i) - } - return ret -} - -func (e expansionCount) repetitionData(key addrs.InstanceKey) RepetitionData { - i := int(key.(addrs.IntKey)) - if i < 0 || i >= int(e) { - panic(fmt.Sprintf("instance key %d out of range for count %d", i, e)) - } - return RepetitionData{ - CountIndex: cty.NumberIntVal(int64(i)), - } -} - -// expansionForEach is the expansion corresponding to the "for_each" argument. -type expansionForEach map[string]cty.Value - -func (e expansionForEach) instanceKeys() []addrs.InstanceKey { - ret := make([]addrs.InstanceKey, 0, len(e)) - for k := range e { - ret = append(ret, addrs.StringKey(k)) - } - sort.Slice(ret, func(i, j int) bool { - return ret[i].(addrs.StringKey) < ret[j].(addrs.StringKey) - }) - return ret -} - -func (e expansionForEach) repetitionData(key addrs.InstanceKey) RepetitionData { - k := string(key.(addrs.StringKey)) - v, ok := e[k] - if !ok { - panic(fmt.Sprintf("instance key %q does not match any instance", k)) - } - return RepetitionData{ - EachKey: cty.StringVal(k), - EachValue: v, - } -} diff --git a/vendor/github.com/hashicorp/terraform/instances/instance_key_data.go b/vendor/github.com/hashicorp/terraform/instances/instance_key_data.go deleted file mode 100644 index 9ada5253..00000000 --- a/vendor/github.com/hashicorp/terraform/instances/instance_key_data.go +++ /dev/null @@ -1,28 +0,0 @@ -package instances - -import ( - "github.com/zclconf/go-cty/cty" -) - -// RepetitionData represents the values available to identify individual -// repetitions of a particular object. -// -// This corresponds to the each.key, each.value, and count.index symbols in -// the configuration language. -type RepetitionData struct { - // CountIndex is the value for count.index, or cty.NilVal if evaluating - // in a context where the "count" argument is not active. - // - // For correct operation, this should always be of type cty.Number if not - // nil. - CountIndex cty.Value - - // EachKey and EachValue are the values for each.key and each.value - // respectively, or cty.NilVal if evaluating in a context where the - // "for_each" argument is not active. These must either both be set - // or neither set. - // - // For correct operation, EachKey must always be either of type cty.String - // or cty.Number if not nil. - EachKey, EachValue cty.Value -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/doc.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/doc.go deleted file mode 100644 index a39aa1dd..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package getproviders is the lowest-level provider automatic installation -// functionality. It can answer questions about what providers and provider -// versions are available in a registry, and it can retrieve the URL for -// the distribution archive for a specific version of a specific provider -// targeting a particular platform. -// -// This package is not responsible for choosing the best version to install -// from a set of available versions, or for any signature verification of the -// archives it fetches. Callers will use this package in conjunction with other -// logic elsewhere in order to construct a full provider installer. -package getproviders diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/errors.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/errors.go deleted file mode 100644 index c5f5191e..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/errors.go +++ /dev/null @@ -1,204 +0,0 @@ -package getproviders - -import ( - "fmt" - - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/addrs" -) - -// ErrHostNoProviders is an error type used to indicate that a hostname given -// in a provider address does not support the provider registry protocol. -type ErrHostNoProviders struct { - Hostname svchost.Hostname - - // HasOtherVersionis set to true if the discovery process detected - // declarations of services named "providers" whose version numbers did not - // match any version supported by the current version of Terraform. - // - // If this is set, it's helpful to hint to the user in an error message - // that the provider host may be expecting an older or a newer version - // of Terraform, rather than that it isn't a provider registry host at all. - HasOtherVersion bool -} - -func (err ErrHostNoProviders) Error() string { - switch { - case err.HasOtherVersion: - return fmt.Sprintf("host %s does not support the provider registry protocol required by this Terraform version, but may be compatible with a different Terraform version", err.Hostname.ForDisplay()) - default: - return fmt.Sprintf("host %s does not offer a Terraform provider registry", err.Hostname.ForDisplay()) - } -} - -// ErrHostUnreachable is an error type used to indicate that a hostname -// given in a provider address did not resolve in DNS, did not respond to an -// HTTPS request for service discovery, or otherwise failed to correctly speak -// the service discovery protocol. -type ErrHostUnreachable struct { - Hostname svchost.Hostname - Wrapped error -} - -func (err ErrHostUnreachable) Error() string { - return fmt.Sprintf("could not connect to %s: %s", err.Hostname.ForDisplay(), err.Wrapped.Error()) -} - -// Unwrap returns the underlying error that occurred when trying to reach the -// indicated host. -func (err ErrHostUnreachable) Unwrap() error { - return err.Wrapped -} - -// ErrUnauthorized is an error type used to indicate that a hostname -// given in a provider address returned a "401 Unauthorized" or "403 Forbidden" -// error response when we tried to access it. -type ErrUnauthorized struct { - Hostname svchost.Hostname - - // HaveCredentials is true when the request that failed included some - // credentials, and thus it seems that those credentials were invalid. - // Conversely, HaveCredentials is false if the request did not include - // credentials at all, in which case it seems that credentials must be - // provided. - HaveCredentials bool -} - -func (err ErrUnauthorized) Error() string { - switch { - case err.HaveCredentials: - return fmt.Sprintf("host %s rejected the given authentication credentials", err.Hostname) - default: - return fmt.Sprintf("host %s requires authentication credentials", err.Hostname) - } -} - -// ErrProviderNotFound is an error type used to indicate that requested provider -// was not found in the source(s) included in the Description field. This can be -// used to produce user-friendly error messages. -type ErrProviderNotFound struct { - Provider addrs.Provider - Sources []string -} - -func (err ErrProviderNotFound) Error() string { - return fmt.Sprintf( - "provider %s was not found in any of the search locations", - err.Provider, - ) -} - -// ErrRegistryProviderNotKnown is an error type used to indicate that the hostname -// given in a provider address does appear to be a provider registry but that -// registry does not know about the given provider namespace or type. -// -// A caller serving requests from an end-user should recognize this error type -// and use it to produce user-friendly hints for common errors such as failing -// to specify an explicit source for a provider not in the default namespace -// (one not under registry.terraform.io/hashicorp/). The default error message -// for this type is a direct description of the problem with no such hints, -// because we expect that the caller will have better context to decide what -// hints are appropriate, e.g. by looking at the configuration given by the -// user. -type ErrRegistryProviderNotKnown struct { - Provider addrs.Provider -} - -func (err ErrRegistryProviderNotKnown) Error() string { - return fmt.Sprintf( - "provider registry %s does not have a provider named %s", - err.Provider.Hostname.ForDisplay(), - err.Provider, - ) -} - -// ErrPlatformNotSupported is an error type used to indicate that a particular -// version of a provider isn't available for a particular target platform. -// -// This is returned when DownloadLocation encounters a 404 Not Found response -// from the underlying registry, because it presumes that a caller will only -// ask for the DownloadLocation for a version it already found the existence -// of via AvailableVersions. -type ErrPlatformNotSupported struct { - Provider addrs.Provider - Version Version - Platform Platform -} - -func (err ErrPlatformNotSupported) Error() string { - return fmt.Sprintf( - "provider %s %s is not available for %s", - err.Provider, - err.Version, - err.Platform, - ) -} - -// ErrProtocolNotSupported is an error type used to indicate that a particular -// version of a provider is not supported by the current version of Terraform. -// -// Specfically, this is returned when the version's plugin protocol is not supported. -// -// When available, the error will include a suggested version that can be displayed to -// the user. Otherwise it will return UnspecifiedVersion -type ErrProtocolNotSupported struct { - Provider addrs.Provider - Version Version - Suggestion Version -} - -func (err ErrProtocolNotSupported) Error() string { - return fmt.Sprintf( - "provider %s %s is not supported by this version of terraform", - err.Provider, - err.Version, - ) -} - -// ErrQueryFailed is an error type used to indicate that the hostname given -// in a provider address does appear to be a provider registry but that when -// we queried it for metadata for the given provider the server returned an -// unexpected error. -// -// This is used for any error responses other than "Not Found", which would -// indicate the absense of a provider and is thus reported using -// ErrProviderNotKnown instead. -type ErrQueryFailed struct { - Provider addrs.Provider - Wrapped error -} - -func (err ErrQueryFailed) Error() string { - return fmt.Sprintf( - "could not query provider registry for %s: %s", - err.Provider.String(), - err.Wrapped.Error(), - ) -} - -// Unwrap returns the underlying error that occurred when trying to reach the -// indicated host. -func (err ErrQueryFailed) Unwrap() error { - return err.Wrapped -} - -// ErrIsNotExist returns true if and only if the given error is one of the -// errors from this package that represents an affirmative response that a -// requested object does not exist. -// -// This is as opposed to errors indicating that the source is unavailable -// or misconfigured in some way, where we therefore cannot say for certain -// whether the requested object exists. -// -// If a caller needs to take a special action based on something not existing, -// such as falling back on some other source, use this function rather than -// direct type assertions so that the set of possible "not exist" errors can -// grow in future. -func ErrIsNotExist(err error) bool { - switch err.(type) { - case ErrProviderNotFound, ErrRegistryProviderNotKnown, ErrPlatformNotSupported: - return true - default: - return false - } -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_mirror_source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_mirror_source.go deleted file mode 100644 index 801116e6..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_mirror_source.go +++ /dev/null @@ -1,126 +0,0 @@ -package getproviders - -import ( - "github.com/hashicorp/terraform/addrs" -) - -// FilesystemMirrorSource is a source that reads providers and their metadata -// from a directory prefix in the local filesystem. -type FilesystemMirrorSource struct { - baseDir string - - // allPackages caches the result of scanning the baseDir for all available - // packages on the first call that needs package availability information, - // to avoid re-scanning the filesystem on subsequent operations. - allPackages map[addrs.Provider]PackageMetaList -} - -var _ Source = (*FilesystemMirrorSource)(nil) - -// NewFilesystemMirrorSource constructs and returns a new filesystem-based -// mirror source with the given base directory. -func NewFilesystemMirrorSource(baseDir string) *FilesystemMirrorSource { - return &FilesystemMirrorSource{ - baseDir: baseDir, - } -} - -// AvailableVersions scans the directory structure under the source's base -// directory for locally-mirrored packages for the given provider, returning -// a list of version numbers for the providers it found. -func (s *FilesystemMirrorSource) AvailableVersions(provider addrs.Provider) (VersionList, error) { - // s.allPackages is populated if scanAllVersions succeeds - err := s.scanAllVersions() - if err != nil { - return nil, err - } - - // There might be multiple packages for a given version in the filesystem, - // but the contract here is to return distinct versions so we'll dedupe - // them first, then sort them, and then return them. - versionsMap := make(map[Version]struct{}) - for _, m := range s.allPackages[provider] { - versionsMap[m.Version] = struct{}{} - } - ret := make(VersionList, 0, len(versionsMap)) - for v := range versionsMap { - ret = append(ret, v) - } - ret.Sort() - return ret, nil -} - -// PackageMeta checks to see if the source's base directory contains a -// local copy of the distribution package for the given provider version on -// the given target, and returns the metadata about it if so. -func (s *FilesystemMirrorSource) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - // s.allPackages is populated if scanAllVersions succeeds - err := s.scanAllVersions() - if err != nil { - return PackageMeta{}, err - } - - relevantPkgs := s.allPackages[provider].FilterProviderPlatformExactVersion(provider, target, version) - if len(relevantPkgs) == 0 { - // This is the local equivalent of a "404 Not Found" when retrieving - // a particular version from a registry or network mirror. Because - // the caller should've selected a version already found by - // AvailableVersions, the only discriminator that should fail here - // is the target platform, and so our error result assumes that, - // causing the caller to return an error like "This provider version is - // not compatible with aros_riscv". - return PackageMeta{}, ErrPlatformNotSupported{ - Provider: provider, - Version: version, - Platform: target, - } - } - - // It's possible that there could be multiple copies of the same package - // available in the filesystem, if e.g. there's both a packed and an - // unpacked variant. For now we assume that the decision between them - // is arbitrary and just take the first one in the result. - return relevantPkgs[0], nil -} - -// AllAvailablePackages scans the directory structure under the source's base -// directory for locally-mirrored packages for all providers, returning a map -// of the discovered packages with the fully-qualified provider names as -// keys. -// -// This is not an operation generally supported by all Source implementations, -// but the filesystem implementation offers it because we also use the -// filesystem mirror source directly to scan our auto-install plugin directory -// and in other automatic discovery situations. -func (s *FilesystemMirrorSource) AllAvailablePackages() (map[addrs.Provider]PackageMetaList, error) { - // s.allPackages is populated if scanAllVersions succeeds - err := s.scanAllVersions() - return s.allPackages, err -} - -func (s *FilesystemMirrorSource) scanAllVersions() error { - if s.allPackages != nil { - // we're distinguishing nil-ness from emptiness here so we can - // recognize when we've scanned the directory without errors, even - // if we found nothing during the scan. - return nil - } - - ret, err := SearchLocalDirectory(s.baseDir) - if err != nil { - return err - } - - // As noted above, we use an explicit empty map so we can distinguish a - // successful-but-empty result from a failure on future calls, so we'll - // make sure that's what we have before we assign it here. - if ret == nil { - ret = make(map[addrs.Provider]PackageMetaList) - } - s.allPackages = ret - return nil -} - -func (s *FilesystemMirrorSource) ForDisplay(provider addrs.Provider) string { - return s.baseDir -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_search.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_search.go deleted file mode 100644 index ed5a9e19..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/filesystem_search.go +++ /dev/null @@ -1,258 +0,0 @@ -package getproviders - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - svchost "github.com/hashicorp/terraform-svchost" - - "github.com/hashicorp/terraform/addrs" -) - -// SearchLocalDirectory performs an immediate, one-off scan of the given base -// directory for provider plugins using the directory structure defined for -// FilesystemMirrorSource. -// -// This is separated to allow other callers, such as the provider plugin cache -// management in the "internal/providercache" package, to use the same -// directory structure conventions. -func SearchLocalDirectory(baseDir string) (map[addrs.Provider]PackageMetaList, error) { - ret := make(map[addrs.Provider]PackageMetaList) - err := filepath.Walk(baseDir, func(fullPath string, info os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("cannot search %s: %s", fullPath, err) - } - - // There are two valid directory structures that we support here... - // Unpacked: registry.terraform.io/hashicorp/aws/2.0.0/linux_amd64 (a directory) - // Packed: registry.terraform.io/hashicorp/aws/terraform-provider-aws_2.0.0_linux_amd64.zip (a file) - // - // Both of these give us enough information to identify the package - // metadata. - fsPath, err := filepath.Rel(baseDir, fullPath) - if err != nil { - // This should never happen because the filepath.Walk contract is - // for the paths to include the base path. - log.Printf("[TRACE] getproviders.SearchLocalDirectory: ignoring malformed path %q during walk: %s", fullPath, err) - return nil - } - relPath := filepath.ToSlash(fsPath) - parts := strings.Split(relPath, "/") - - if len(parts) < 3 { - // Likely a prefix of a valid path, so we'll ignore it and visit - // the full valid path on a later call. - return nil - } - - hostnameGiven := parts[0] - namespace := parts[1] - typeName := parts[2] - - // validate each part - // The legacy provider namespace is a special case. - if namespace != addrs.LegacyProviderNamespace { - _, err = addrs.ParseProviderPart(namespace) - if err != nil { - log.Printf("[WARN] local provider path %q contains invalid namespace %q; ignoring", fullPath, namespace) - return nil - } - } - - _, err = addrs.ParseProviderPart(typeName) - if err != nil { - log.Printf("[WARN] local provider path %q contains invalid type %q; ignoring", fullPath, typeName) - return nil - } - - hostname, err := svchost.ForComparison(hostnameGiven) - if err != nil { - log.Printf("[WARN] local provider path %q contains invalid hostname %q; ignoring", fullPath, hostnameGiven) - return nil - } - var providerAddr addrs.Provider - if namespace == addrs.LegacyProviderNamespace { - if hostname != addrs.DefaultRegistryHost { - log.Printf("[WARN] local provider path %q indicates a legacy provider not on the default registry host; ignoring", fullPath) - return nil - } - providerAddr = addrs.NewLegacyProvider(typeName) - } else { - providerAddr = addrs.NewProvider(hostname, namespace, typeName) - } - - // The "info" passed to our function is an Lstat result, so it might - // be referring to a symbolic link. We'll do a full "Stat" on it - // now to make sure we're making tests against the real underlying - // filesystem object below. - info, err = os.Stat(fullPath) - if err != nil { - return fmt.Errorf("failed to read metadata about %s: %s", fullPath, err) - } - - switch len(parts) { - case 5: // Might be unpacked layout - if !info.IsDir() { - return nil // packed layout requires a directory - } - - versionStr := parts[3] - version, err := ParseVersion(versionStr) - if err != nil { - log.Printf("[WARN] ignoring local provider path %q with invalid version %q: %s", fullPath, versionStr, err) - return nil - } - - platformStr := parts[4] - platform, err := ParsePlatform(platformStr) - if err != nil { - log.Printf("[WARN] ignoring local provider path %q with invalid platform %q: %s", fullPath, platformStr, err) - return nil - } - - log.Printf("[TRACE] getproviders.SearchLocalDirectory: found %s v%s for %s at %s", providerAddr, version, platform, fullPath) - - meta := PackageMeta{ - Provider: providerAddr, - Version: version, - - // FIXME: How do we populate this? - ProtocolVersions: nil, - TargetPlatform: platform, - - // Because this is already unpacked, the filename is synthetic - // based on the standard naming scheme. - Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", providerAddr.Type, version, platform), - Location: PackageLocalDir(fullPath), - - // FIXME: What about the SHA256Sum field? As currently specified - // it's a hash of the zip file, but this thing is already - // unpacked and so we don't have the zip file to hash. - } - ret[providerAddr] = append(ret[providerAddr], meta) - - case 4: // Might be packed layout - if info.IsDir() { - return nil // packed layout requires a file - } - - filename := filepath.Base(fsPath) - // the filename components are matched case-insensitively, and - // the normalized form of them is in lowercase so we'll convert - // to lowercase for comparison here. (This normalizes only for case, - // because that is the primary constraint affecting compatibility - // between filesystem implementations on different platforms; - // filenames are expected to be pre-normalized and valid in other - // regards.) - normFilename := strings.ToLower(filename) - - // In the packed layout, the version number and target platform - // are derived from the package filename, but only if the - // filename has the expected prefix identifying it as a package - // for the provider in question, and the suffix identifying it - // as a zip file. - prefix := "terraform-provider-" + providerAddr.Type + "_" - const suffix = ".zip" - if !strings.HasPrefix(normFilename, prefix) { - log.Printf("[WARN] ignoring file %q as possible package for %s: filename lacks expected prefix %q", fsPath, providerAddr, prefix) - return nil - } - if !strings.HasSuffix(normFilename, suffix) { - log.Printf("[WARN] ignoring file %q as possible package for %s: filename lacks expected suffix %q", fsPath, providerAddr, suffix) - return nil - } - - // Extract the version and target part of the filename, which - // will look like "2.1.0_linux_amd64" - infoSlice := normFilename[len(prefix) : len(normFilename)-len(suffix)] - infoParts := strings.Split(infoSlice, "_") - if len(infoParts) < 3 { - log.Printf("[WARN] ignoring file %q as possible package for %s: filename does not include version number, target OS, and target architecture", fsPath, providerAddr) - return nil - } - - versionStr := infoParts[0] - version, err := ParseVersion(versionStr) - if err != nil { - log.Printf("[WARN] ignoring local provider path %q with invalid version %q: %s", fullPath, versionStr, err) - return nil - } - - // We'll reassemble this back into a single string just so we can - // easily re-use our existing parser and its normalization rules. - platformStr := infoParts[1] + "_" + infoParts[2] - platform, err := ParsePlatform(platformStr) - if err != nil { - log.Printf("[WARN] ignoring local provider path %q with invalid platform %q: %s", fullPath, platformStr, err) - return nil - } - - log.Printf("[TRACE] getproviders.SearchLocalDirectory: found %s v%s for %s at %s", providerAddr, version, platform, fullPath) - - meta := PackageMeta{ - Provider: providerAddr, - Version: version, - - // FIXME: How do we populate this? - ProtocolVersions: nil, - TargetPlatform: platform, - - // Because this is already unpacked, the filename is synthetic - // based on the standard naming scheme. - Filename: normFilename, // normalized filename, because this field says what it _should_ be called, not what it _is_ called - Location: PackageLocalArchive(fullPath), // non-normalized here, because this is the actual physical location - - // TODO: Also populate the SHA256Sum field. Skipping that - // for now because our initial uses of this result -- - // scanning already-installed providers in local directories, - // rather than explicit filesystem mirrors -- doesn't do - // any hash verification anyway, and this is consistent with - // the FIXME in the unpacked case above even though technically - // we _could_ populate SHA256Sum here right now. - } - ret[providerAddr] = append(ret[providerAddr], meta) - - } - - return nil - }) - if err != nil { - return nil, err - } - // Sort the results to be deterministic (aside from semver build metadata) - // and consistent with ordering from other functions. - for _, l := range ret { - l.Sort() - } - return ret, nil -} - -// UnpackedDirectoryPathForPackage is similar to -// PackageMeta.UnpackedDirectoryPath but makes its decision based on -// individually-passed provider address, version, and target platform so that -// it can be used by callers outside this package that may have other -// types that represent package identifiers. -func UnpackedDirectoryPathForPackage(baseDir string, provider addrs.Provider, version Version, platform Platform) string { - return filepath.ToSlash(filepath.Join( - baseDir, - provider.Hostname.ForDisplay(), provider.Namespace, provider.Type, - version.String(), - platform.String(), - )) -} - -// PackedFilePathForPackage is similar to -// PackageMeta.PackedFilePath but makes its decision based on -// individually-passed provider address, version, and target platform so that -// it can be used by callers outside this package that may have other -// types that represent package identifiers. -func PackedFilePathForPackage(baseDir string, provider addrs.Provider, version Version, platform Platform) string { - return filepath.ToSlash(filepath.Join( - baseDir, - provider.Hostname.ForDisplay(), provider.Namespace, provider.Type, - fmt.Sprintf("terraform-provider-%s_%s_%s.zip", provider.Type, version.String(), platform.String()), - )) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/hash.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/hash.go deleted file mode 100644 index 50463847..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/hash.go +++ /dev/null @@ -1,157 +0,0 @@ -package getproviders - -import ( - "fmt" - "path/filepath" - "strings" - - "golang.org/x/mod/sumdb/dirhash" -) - -// PackageHash computes a hash of the contents of the package at the given -// location, using whichever hash algorithm is the current default. -// -// Currently, this method returns version 1 hashes as produced by the -// function PackageHashV1, but this function may switch to other versions in -// later releases. Call PackageHashV1 directly if you specifically need a V1 -// hash. -// -// PackageHash can be used only with the two local package location types -// PackageLocalDir and PackageLocalArchive, because it needs to access the -// contents of the indicated package in order to compute the hash. If given -// a non-local location this function will always return an error. -func PackageHash(loc PackageLocation) (string, error) { - return PackageHashV1(loc) -} - -// PackageMatchesHash returns true if the package at the given location matches -// the given hash, or false otherwise. -// -// If it cannot read from the given location, or if the given hash is in an -// unsupported format, PackageMatchesHash returns an error. -// -// There is currently only one hash format, as implemented by HashV1. However, -// if others are introduced in future PackageMatchesHash may accept multiple -// formats, and may generate errors for any formats that become obsolete. -// -// PackageMatchesHash can be used only with the two local package location types -// PackageLocalDir and PackageLocalArchive, because it needs to access the -// contents of the indicated package in order to compute the hash. If given -// a non-local location this function will always return an error. -func PackageMatchesHash(loc PackageLocation, want string) (bool, error) { - switch { - case strings.HasPrefix(want, "h1"): - got, err := PackageHashV1(loc) - if err != nil { - return false, err - } - return got == want, nil - default: - return false, fmt.Errorf("unsupported hash format (this may require a newer version of Terraform)") - } -} - -// PackageHashV1 computes a hash of the contents of the package at the given -// location using hash algorithm 1. -// -// The hash covers the paths to files in the directory and the contents of -// those files. It does not cover other metadata about the files, such as -// permissions. -// -// This function is named "PackageHashV1" in anticipation of other hashing -// algorithms being added in a backward-compatible way in future. The result -// from PackageHashV1 always begins with the prefix "h1:" so that callers can -// distinguish the results of potentially multiple different hash algorithms in -// future. -// -// PackageHashV1 can be used only with the two local package location types -// PackageLocalDir and PackageLocalArchive, because it needs to access the -// contents of the indicated package in order to compute the hash. If given -// a non-local location this function will always return an error. -func PackageHashV1(loc PackageLocation) (string, error) { - // Our HashV1 is really just the Go Modules hash version 1, which is - // sufficient for our needs and already well-used for identity of - // Go Modules distribution packages. It is also blocked from incompatible - // changes by being used in a wide array of go.sum files already. - // - // In particular, it also supports computing an equivalent hash from - // an unpacked zip file, which is not important for Terraform workflow - // today but is likely to become so in future if we adopt a top-level - // lockfile mechanism that is intended to be checked in to version control, - // rather than just a transient lock for a particular local cache directory. - // (In that case we'd need to check hashes of _packed_ packages, too.) - // - // Internally, dirhash.Hash1 produces a string containing a sequence of - // newline-separated path+filehash pairs for all of the files in the - // directory, and then finally produces a hash of that string to return. - // In both cases, the hash algorithm is SHA256. - - switch loc := loc.(type) { - - case PackageLocalDir: - // We'll first dereference a possible symlink at our PackageDir location, - // as would be created if this package were linked in from another cache. - packageDir, err := filepath.EvalSymlinks(string(loc)) - if err != nil { - return "", err - } - - return dirhash.HashDir(packageDir, "", dirhash.Hash1) - - case PackageLocalArchive: - archivePath, err := filepath.EvalSymlinks(string(loc)) - if err != nil { - return "", err - } - - return dirhash.HashZip(archivePath, dirhash.Hash1) - - default: - return "", fmt.Errorf("cannot hash package at %s", loc.String()) - } -} - -// Hash computes a hash of the contents of the package at the location -// associated with the reciever, using whichever hash algorithm is the current -// default. -// -// This method will change to use new hash versions as they are introduced -// in future. If you need a specific hash version, call the method for that -// version directly instead, such as HashV1. -// -// Hash can be used only with the two local package location types -// PackageLocalDir and PackageLocalArchive, because it needs to access the -// contents of the indicated package in order to compute the hash. If given -// a non-local location this function will always return an error. -func (m PackageMeta) Hash() (string, error) { - return PackageHash(m.Location) -} - -// MatchesHash returns true if the package at the location associated with -// the receiver matches the given hash, or false otherwise. -// -// If it cannot read from the given location, or if the given hash is in an -// unsupported format, MatchesHash returns an error. -// -// MatchesHash can be used only with the two local package location types -// PackageLocalDir and PackageLocalArchive, because it needs to access the -// contents of the indicated package in order to compute the hash. If given -// a non-local location this function will always return an error. -func (m PackageMeta) MatchesHash(want string) (bool, error) { - return PackageMatchesHash(m.Location, want) -} - -// HashV1 computes a hash of the contents of the package at the location -// associated with the receiver using hash algorithm 1. -// -// The hash covers the paths to files in the directory and the contents of -// those files. It does not cover other metadata about the files, such as -// permissions. -// -// HashV1 can be used only with the two local package location types -// PackageLocalDir and PackageLocalArchive, because it needs to access the -// contents of the indicated package in order to compute the hash. If given -// a non-local location this function will always return an error. -func (m PackageMeta) HashV1() (string, error) { - return PackageHashV1(m.Location) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/http_mirror_source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/http_mirror_source.go deleted file mode 100644 index d00323c3..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/http_mirror_source.go +++ /dev/null @@ -1,42 +0,0 @@ -package getproviders - -import ( - "fmt" - "net/url" - - "github.com/hashicorp/terraform/addrs" -) - -// HTTPMirrorSource is a source that reads provider metadata from a provider -// mirror that is accessible over the HTTP provider mirror protocol. -type HTTPMirrorSource struct { - baseURL *url.URL -} - -var _ Source = (*HTTPMirrorSource)(nil) - -// NewHTTPMirrorSource constructs and returns a new network mirror source with -// the given base URL. The relative URL offsets defined by the HTTP mirror -// protocol will be resolve relative to the given URL. -func NewHTTPMirrorSource(baseURL *url.URL) *HTTPMirrorSource { - return &HTTPMirrorSource{ - baseURL: baseURL, - } -} - -// AvailableVersions retrieves the available versions for the given provider -// from the object's underlying HTTP mirror service. -func (s *HTTPMirrorSource) AvailableVersions(provider addrs.Provider) (VersionList, error) { - return nil, fmt.Errorf("Network-based provider mirrors are not supported in this version of Terraform") -} - -// PackageMeta retrieves metadata for the requested provider package -// from the object's underlying HTTP mirror service. -func (s *HTTPMirrorSource) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - return PackageMeta{}, fmt.Errorf("Network-based provider mirrors are not supported in this version of Terraform") -} - -// ForDisplay returns a string description of the source for user-facing output. -func (s *HTTPMirrorSource) ForDisplay(provider addrs.Provider) string { - return "Network-based provider mirrors are not supported in this version of Terraform" -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/legacy_lookup.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/legacy_lookup.go deleted file mode 100644 index 71d9b15c..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/legacy_lookup.go +++ /dev/null @@ -1,126 +0,0 @@ -package getproviders - -import ( - "fmt" - - svchost "github.com/hashicorp/terraform-svchost" - - "github.com/hashicorp/terraform/addrs" -) - -// LookupLegacyProvider attempts to resolve a legacy provider address (whose -// registry host and namespace are implied, rather than explicit) into a -// fully-qualified provider address, by asking the main Terraform registry -// to resolve it. -// -// If the given address is not a legacy provider address then it will just be -// returned verbatim without making any outgoing requests. -// -// Legacy provider lookup is possible only if the given source is either a -// *RegistrySource directly or if it is a MultiSource containing a -// *RegistrySource whose selector matching patterns include the -// public registry hostname registry.terraform.io. -// -// This is a backward-compatibility mechanism for compatibility with existing -// configurations that don't include explicit provider source addresses. New -// configurations should not rely on it, and this fallback mechanism is -// likely to be removed altogether in a future Terraform version. -func LookupLegacyProvider(addr addrs.Provider, source Source) (addrs.Provider, error) { - if addr.Namespace != "-" { - return addr, nil - } - if addr.Hostname != defaultRegistryHost { // condition above assures namespace is also "-" - // Legacy providers must always belong to the default registry host. - return addrs.Provider{}, fmt.Errorf("invalid provider type %q: legacy provider addresses must always belong to %s", addr, defaultRegistryHost) - } - - // Now we need to derive a suitable *RegistrySource from the given source, - // either directly or indirectly. This will not be possible if the user - // has configured Terraform to disable direct installation from - // registry.terraform.io; in that case, fully-qualified provider addresses - // are always required. - regSource := findLegacyProviderLookupSource(addr.Hostname, source) - if regSource == nil { - // This error message is assuming that the given Source was produced - // based on the CLI configuration, which isn't necessarily true but - // is true in all cases where this error message will ultimately be - // presented to an end-user, so good enough for now. - return addrs.Provider{}, fmt.Errorf("unqualified provider type %q cannot be resolved because direct installation from %s is disabled in the CLI configuration; declare an explicit provider namespace for this provider", addr.Type, addr.Hostname) - } - - defaultNamespace, err := regSource.LookupLegacyProviderNamespace(addr.Hostname, addr.Type) - if err != nil { - return addrs.Provider{}, err - } - - return addrs.Provider{ - Hostname: addr.Hostname, - Namespace: defaultNamespace, - Type: addr.Type, - }, nil -} - -// findLegacyProviderLookupSource tries to find a *RegistrySource that can talk -// to the given registry host in the given Source. It might be given directly, -// or it might be given indirectly via a MultiSource where the selector -// includes a wildcard for registry.terraform.io. -// -// Returns nil if the given source does not have any configured way to talk -// directly to the given host. -// -// If the given source contains multiple sources that can talk to the given -// host directly, the first one in the sequence takes preference. In practice -// it's pointless to have two direct installation sources that match the same -// hostname anyway, so this shouldn't arise in normal use. -func findLegacyProviderLookupSource(host svchost.Hostname, source Source) *RegistrySource { - switch source := source.(type) { - - case *RegistrySource: - // Easy case: the source is a registry source directly, and so we'll - // just use it. - return source - - case *MemoizeSource: - // Also easy: the source is a memoize wrapper, so defer to its - // underlying source. - return findLegacyProviderLookupSource(host, source.underlying) - - case MultiSource: - // Trickier case: if it's a multisource then we need to scan over - // its selectors until we find one that is a *RegistrySource _and_ - // that is configured to accept arbitrary providers from the - // given hostname. - - // For our matching purposes we'll use an address that would not be - // valid as a real provider FQN and thus can only match a selector - // that has no filters at all or a selector that wildcards everything - // except the hostname, like "registry.terraform.io/*/*" - matchAddr := addrs.Provider{ - Hostname: host, - // Other fields are intentionally left empty, to make this invalid - // as a specific provider address. - } - - for _, selector := range source { - // If this source has suitable matching patterns to install from - // the given hostname then we'll recursively search inside it - // for *RegistrySource objects. - if selector.CanHandleProvider(matchAddr) { - ret := findLegacyProviderLookupSource(host, selector.Source) - if ret != nil { - return ret - } - } - } - - // If we get here then there were no selectors that are both configured - // to handle modules from the given hostname and that are registry - // sources, so we fail. - return nil - - default: - // This source cannot be and cannot contain a *RegistrySource, so - // we fail. - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/memoize_source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/memoize_source.go deleted file mode 100644 index 4513ea4a..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/memoize_source.go +++ /dev/null @@ -1,100 +0,0 @@ -package getproviders - -import ( - "sync" - - "github.com/hashicorp/terraform/addrs" -) - -// MemoizeSource is a Source that wraps another Source and remembers its -// results so that they can be returned more quickly on future calls to the -// same object. -// -// Each MemoizeSource maintains a cache of response it has seen as part of its -// body. All responses are retained for the remaining lifetime of the object. -// Errors from the underlying source are also cached, and so subsequent calls -// with the same arguments will always produce the same errors. -// -// A MemoizeSource can be called concurrently, with incoming requests processed -// sequentially. -type MemoizeSource struct { - underlying Source - availableVersions map[addrs.Provider]memoizeAvailableVersionsRet - packageMetas map[memoizePackageMetaCall]memoizePackageMetaRet - mu sync.Mutex -} - -type memoizeAvailableVersionsRet struct { - VersionList VersionList - Err error -} - -type memoizePackageMetaCall struct { - Provider addrs.Provider - Version Version - Target Platform -} - -type memoizePackageMetaRet struct { - PackageMeta PackageMeta - Err error -} - -var _ Source = (*MemoizeSource)(nil) - -// NewMemoizeSource constructs and returns a new MemoizeSource that wraps -// the given underlying source and memoizes its results. -func NewMemoizeSource(underlying Source) *MemoizeSource { - return &MemoizeSource{ - underlying: underlying, - availableVersions: make(map[addrs.Provider]memoizeAvailableVersionsRet), - packageMetas: make(map[memoizePackageMetaCall]memoizePackageMetaRet), - } -} - -// AvailableVersions requests the available versions from the underlying source -// and caches them before returning them, or on subsequent calls returns the -// result directly from the cache. -func (s *MemoizeSource) AvailableVersions(provider addrs.Provider) (VersionList, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if existing, exists := s.availableVersions[provider]; exists { - return existing.VersionList, existing.Err - } - - ret, err := s.underlying.AvailableVersions(provider) - s.availableVersions[provider] = memoizeAvailableVersionsRet{ - VersionList: ret, - Err: err, - } - return ret, err -} - -// PackageMeta requests package metadata from the underlying source and caches -// the result before returning it, or on subsequent calls returns the result -// directly from the cache. -func (s *MemoizeSource) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - s.mu.Lock() - defer s.mu.Unlock() - - key := memoizePackageMetaCall{ - Provider: provider, - Version: version, - Target: target, - } - if existing, exists := s.packageMetas[key]; exists { - return existing.PackageMeta, existing.Err - } - - ret, err := s.underlying.PackageMeta(provider, version, target) - s.packageMetas[key] = memoizePackageMetaRet{ - PackageMeta: ret, - Err: err, - } - return ret, err -} - -func (s *MemoizeSource) ForDisplay(provider addrs.Provider) string { - return s.underlying.ForDisplay(provider) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/mock_source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/mock_source.go deleted file mode 100644 index 1672cc89..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/mock_source.go +++ /dev/null @@ -1,204 +0,0 @@ -package getproviders - -import ( - "archive/zip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/hashicorp/terraform/addrs" -) - -// MockSource is an in-memory-only, statically-configured source intended for -// use only in unit tests of other subsystems that consume provider sources. -// -// The MockSource also tracks calls to it in case a calling test wishes to -// assert that particular calls were made. -// -// This should not be used outside of unit test code. -type MockSource struct { - packages []PackageMeta - calls [][]interface{} -} - -var _ Source = (*MockSource)(nil) - -// NewMockSource creates and returns a MockSource with the given packages. -// -// The given packages don't necessarily need to refer to objects that actually -// exist on disk or over the network, unless the calling test is planning to -// use (directly or indirectly) the results for further provider installation -// actions. -func NewMockSource(packages []PackageMeta) *MockSource { - return &MockSource{ - packages: packages, - } -} - -// AvailableVersions returns all of the versions of the given provider that -// are available in the fixed set of packages that were passed to -// NewMockSource when creating the receiving source. -func (s *MockSource) AvailableVersions(provider addrs.Provider) (VersionList, error) { - s.calls = append(s.calls, []interface{}{"AvailableVersions", provider}) - var ret VersionList - for _, pkg := range s.packages { - if pkg.Provider == provider { - ret = append(ret, pkg.Version) - } - } - if len(ret) == 0 { - // In this case, we'll behave like a registry that doesn't know about - // this provider at all, rather than just returning an empty result. - return nil, ErrRegistryProviderNotKnown{provider} - } - ret.Sort() - return ret, nil -} - -// PackageMeta returns the first package from the list given to NewMockSource -// when creating the receiver that has the given provider, version, and -// target platform. -// -// If none of the packages match, it returns ErrPlatformNotSupported to -// simulate the situation where a provider release isn't available for a -// particular platform. -// -// Note that if the list of packages passed to NewMockSource contains more -// than one with the same provider, version, and target this function will -// always return the first one in the list, which may not match the behavior -// of other sources in an equivalent situation because it's a degenerate case -// with undefined results. -func (s *MockSource) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - s.calls = append(s.calls, []interface{}{"PackageMeta", provider, version, target}) - - for _, pkg := range s.packages { - if pkg.Provider != provider { - continue - } - if pkg.Version != version { - // (We're using strict equality rather than precedence here, - // because this is an exact version specification. The caller - // should consider precedence when selecting a version in the - // AvailableVersions response, and pass the exact selected - // version here.) - continue - } - if pkg.TargetPlatform != target { - continue - } - return pkg, nil - } - - // If we fall out here then nothing matched at all, so we'll treat that - // as "platform not supported" for consistency with RegistrySource. - return PackageMeta{}, ErrPlatformNotSupported{ - Provider: provider, - Version: version, - Platform: target, - } -} - -// CallLog returns a list of calls to other methods of the receiever that have -// been called since it was created, in case a calling test wishes to verify -// a particular sequence of operations. -// -// The result is a slice of slices where the first element of each inner slice -// is the name of the method that was called, and then any subsequent elements -// are positional arguments passed to that method. -// -// Callers are forbidden from modifying any objects accessible via the returned -// value. -func (s *MockSource) CallLog() [][]interface{} { - return s.calls -} - -// FakePackageMeta constructs and returns a PackageMeta that carries the given -// metadata but has fake location information that is likely to fail if -// attempting to install from it. -func FakePackageMeta(provider addrs.Provider, version Version, protocols VersionList, target Platform) PackageMeta { - return PackageMeta{ - Provider: provider, - Version: version, - ProtocolVersions: protocols, - TargetPlatform: target, - - // Some fake but somewhat-realistic-looking other metadata. This - // points nowhere, so will fail if attempting to actually use it. - Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", provider.Type, version.String(), target.String()), - Location: PackageHTTPURL(fmt.Sprintf("https://fake.invalid/terraform-provider-%s_%s.zip", provider.Type, version.String())), - } -} - -// FakeInstallablePackageMeta constructs and returns a PackageMeta that points -// to a temporary archive file that could actually be installed in principle. -// -// Installing it will not produce a working provider though: just a fake file -// posing as an executable. -// -// It's the caller's responsibility to call the close callback returned -// alongside the result in order to clean up the temporary file. The caller -// should call the callback even if this function returns an error, because -// some error conditions leave a partially-created file on disk. -func FakeInstallablePackageMeta(provider addrs.Provider, version Version, protocols VersionList, target Platform) (PackageMeta, func(), error) { - f, err := ioutil.TempFile("", "terraform-getproviders-fake-package-") - if err != nil { - return PackageMeta{}, func() {}, err - } - - // After this point, all of our return paths should include this as the - // close callback. - close := func() { - f.Close() - os.Remove(f.Name()) - } - - execFilename := fmt.Sprintf("terraform-provider-%s_%s", provider.Type, version.String()) - if target.OS == "windows" { - // For a little more (technically unnecessary) realism... - execFilename += ".exe" - } - - zw := zip.NewWriter(f) - fw, err := zw.Create(execFilename) - if err != nil { - return PackageMeta{}, close, fmt.Errorf("failed to add %s to mock zip file: %s", execFilename, err) - } - fmt.Fprintf(fw, "This is a fake provider package for %s %s, not a real provider.\n", provider, version) - err = zw.Close() - if err != nil { - return PackageMeta{}, close, fmt.Errorf("failed to close the mock zip file: %s", err) - } - - // Compute the SHA256 checksum of the generated file, to allow package - // authentication code to be exercised. - f.Seek(0, io.SeekStart) - h := sha256.New() - io.Copy(h, f) - checksum := [32]byte{} - h.Sum(checksum[:0]) - - meta := PackageMeta{ - Provider: provider, - Version: version, - ProtocolVersions: protocols, - TargetPlatform: target, - - Location: PackageLocalArchive(f.Name()), - - // This is a fake filename that mimics what a real registry might - // indicate as a good filename for this package, in case some caller - // intends to use it to name a local copy of the temporary file. - // (At the time of writing, no caller actually does that, but who - // knows what the future holds?) - Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", provider.Type, version.String(), target.String()), - - Authentication: NewArchiveChecksumAuthentication(checksum), - } - return meta, close, nil -} - -func (s *MockSource) ForDisplay(provider addrs.Provider) string { - return "mock source" -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/multi_source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/multi_source.go deleted file mode 100644 index 1d25938c..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/multi_source.go +++ /dev/null @@ -1,251 +0,0 @@ -package getproviders - -import ( - "fmt" - "strings" - - svchost "github.com/hashicorp/terraform-svchost" - - "github.com/hashicorp/terraform/addrs" -) - -// MultiSource is a Source that wraps a series of other sources and combines -// their sets of available providers and provider versions. -// -// A MultiSource consists of a sequence of selectors that each specify an -// underlying source to query and a set of matching patterns to decide which -// providers can be retrieved from which sources. If multiple selectors find -// a given provider version then the earliest one in the sequence takes -// priority for deciding the package metadata for the provider. -// -// For underlying sources that make network requests, consider wrapping each -// one in a MemoizeSource so that availability information retrieved in -// AvailableVersions can be reused in PackageMeta. -type MultiSource []MultiSourceSelector - -var _ Source = MultiSource(nil) - -// AvailableVersions retrieves all of the versions of the given provider -// that are available across all of the underlying selectors, while respecting -// each selector's matching patterns. -func (s MultiSource) AvailableVersions(provider addrs.Provider) (VersionList, error) { - if len(s) == 0 { // Easy case: there can be no available versions - return nil, nil - } - - // We will return the union of all versions reported by the nested - // sources that have matching patterns that accept the given provider. - vs := make(map[Version]struct{}) - var registryError bool - for _, selector := range s { - if !selector.CanHandleProvider(provider) { - continue // doesn't match the given patterns - } - thisSourceVersions, err := selector.Source.AvailableVersions(provider) - switch err.(type) { - case nil: - // okay - case ErrRegistryProviderNotKnown: - registryError = true - continue // ignore, then - case ErrProviderNotFound: - continue // ignore, then - default: - return nil, err - } - for _, v := range thisSourceVersions { - vs[v] = struct{}{} - } - } - - if len(vs) == 0 { - if registryError { - return nil, ErrRegistryProviderNotKnown{provider} - } else { - return nil, ErrProviderNotFound{provider, s.sourcesForProvider(provider)} - } - } - ret := make(VersionList, 0, len(vs)) - for v := range vs { - ret = append(ret, v) - } - ret.Sort() - - return ret, nil -} - -// PackageMeta retrieves the package metadata for the requested provider package -// from the first selector that indicates availability of it. -func (s MultiSource) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - if len(s) == 0 { // Easy case: no providers exist at all - return PackageMeta{}, ErrProviderNotFound{provider, s.sourcesForProvider(provider)} - } - - for _, selector := range s { - if !selector.CanHandleProvider(provider) { - continue // doesn't match the given patterns - } - meta, err := selector.Source.PackageMeta(provider, version, target) - switch err.(type) { - case nil: - return meta, nil - case ErrProviderNotFound, ErrRegistryProviderNotKnown, ErrPlatformNotSupported: - continue // ignore, then - default: - return PackageMeta{}, err - } - } - - // If we fall out here then none of the sources have the requested - // package. - return PackageMeta{}, ErrPlatformNotSupported{ - Provider: provider, - Version: version, - Platform: target, - } -} - -// MultiSourceSelector is an element of the source selection configuration on -// MultiSource. A MultiSource has zero or more of these to configure which -// underlying sources it should consult for a given provider. -type MultiSourceSelector struct { - // Source is the underlying source that this selector applies to. - Source Source - - // Include and Exclude are sets of provider matching patterns that - // together define which providers are eligible to be potentially - // installed from the corresponding Source. - Include, Exclude MultiSourceMatchingPatterns -} - -// MultiSourceMatchingPatterns is a set of patterns that together define a -// set of providers by matching on the segments of the provider FQNs. -// -// The Provider address values in a MultiSourceMatchingPatterns are special in -// that any of Hostname, Namespace, or Type can be getproviders.Wildcard -// to indicate that any concrete value is permitted for that segment. -type MultiSourceMatchingPatterns []addrs.Provider - -// ParseMultiSourceMatchingPatterns parses a slice of strings containing the -// string form of provider matching patterns and, if all the given strings are -// valid, returns the corresponding, normalized, MultiSourceMatchingPatterns -// value. -func ParseMultiSourceMatchingPatterns(strs []string) (MultiSourceMatchingPatterns, error) { - if len(strs) == 0 { - return nil, nil - } - - ret := make(MultiSourceMatchingPatterns, len(strs)) - for i, str := range strs { - parts := strings.Split(str, "/") - if len(parts) < 2 || len(parts) > 3 { - return nil, fmt.Errorf("invalid provider matching pattern %q: must have either two or three slash-separated segments", str) - } - host := defaultRegistryHost - explicitHost := len(parts) == 3 - if explicitHost { - givenHost := parts[0] - if givenHost == "*" { - host = svchost.Hostname(Wildcard) - } else { - normalHost, err := svchost.ForComparison(givenHost) - if err != nil { - return nil, fmt.Errorf("invalid hostname in provider matching pattern %q: %s", str, err) - } - - // The remaining code below deals only with the namespace/type portions. - host = normalHost - } - - parts = parts[1:] - } - - pType, err := normalizeProviderNameOrWildcard(parts[1]) - if err != nil { - return nil, fmt.Errorf("invalid provider type %q in provider matching pattern %q: must either be the wildcard * or a provider type name", parts[1], str) - } - namespace, err := normalizeProviderNameOrWildcard(parts[0]) - if err != nil { - return nil, fmt.Errorf("invalid registry namespace %q in provider matching pattern %q: must either be the wildcard * or a literal namespace", parts[1], str) - } - - ret[i] = addrs.Provider{ - Hostname: host, - Namespace: namespace, - Type: pType, - } - - if ret[i].Hostname == svchost.Hostname(Wildcard) && !(ret[i].Namespace == Wildcard && ret[i].Type == Wildcard) { - return nil, fmt.Errorf("invalid provider matching pattern %q: hostname can be a wildcard only if both namespace and provider type are also wildcards", str) - } - if ret[i].Namespace == Wildcard && ret[i].Type != Wildcard { - return nil, fmt.Errorf("invalid provider matching pattern %q: namespace can be a wildcard only if the provider type is also a wildcard", str) - } - } - return ret, nil -} - -// CanHandleProvider returns true if and only if the given provider address -// is both included by the selector's include patterns and _not_ excluded -// by its exclude patterns. -// -// The absense of any include patterns is treated the same as a pattern -// that matches all addresses. Exclusions take priority over inclusions. -func (s MultiSourceSelector) CanHandleProvider(addr addrs.Provider) bool { - switch { - case s.Exclude.MatchesProvider(addr): - return false - case len(s.Include) > 0: - return s.Include.MatchesProvider(addr) - default: - return true - } -} - -// MatchesProvider tests whether the receiving matching patterns match with -// the given concrete provider address. -func (ps MultiSourceMatchingPatterns) MatchesProvider(addr addrs.Provider) bool { - for _, pattern := range ps { - hostMatch := (pattern.Hostname == svchost.Hostname(Wildcard) || pattern.Hostname == addr.Hostname) - namespaceMatch := (pattern.Namespace == Wildcard || pattern.Namespace == addr.Namespace) - typeMatch := (pattern.Type == Wildcard || pattern.Type == addr.Type) - if hostMatch && namespaceMatch && typeMatch { - return true - } - } - return false -} - -// Wildcard is a string value representing a wildcard element in the Include -// and Exclude patterns used with MultiSource. It is not valid to use Wildcard -// anywhere else. -const Wildcard string = "*" - -// We'll read the default registry host from over in the addrs package, to -// avoid duplicating it. A "default" provider uses the default registry host -// by definition. -var defaultRegistryHost = addrs.DefaultRegistryHost - -func normalizeProviderNameOrWildcard(s string) (string, error) { - if s == Wildcard { - return s, nil - } - return addrs.ParseProviderPart(s) -} - -func (s MultiSource) ForDisplay(provider addrs.Provider) string { - return strings.Join(s.sourcesForProvider(provider), "\n") -} - -// sourcesForProvider returns a list of source display strings configured for a -// given provider, taking into account any `Exclude` statements. -func (s MultiSource) sourcesForProvider(provider addrs.Provider) []string { - ret := make([]string, 0) - for _, selector := range s { - if !selector.CanHandleProvider(provider) { - continue // doesn't match the given patterns - } - ret = append(ret, selector.Source.ForDisplay(provider)) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/package_authentication.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/package_authentication.go deleted file mode 100644 index 987908df..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/package_authentication.go +++ /dev/null @@ -1,355 +0,0 @@ -package getproviders - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "log" - "os" - "strings" - - "golang.org/x/crypto/openpgp" - openpgpArmor "golang.org/x/crypto/openpgp/armor" - openpgpErrors "golang.org/x/crypto/openpgp/errors" -) - -type packageAuthenticationResult int - -const ( - verifiedChecksum packageAuthenticationResult = iota - officialProvider - partnerProvider - communityProvider -) - -// PackageAuthenticationResult is returned from a PackageAuthentication -// implementation. It is a mostly-opaque type intended for use in UI, which -// implements Stringer. -// -// A failed PackageAuthentication attempt will return an "unauthenticated" -// result, which is represented by nil. -type PackageAuthenticationResult struct { - result packageAuthenticationResult - KeyID string -} - -func (t *PackageAuthenticationResult) String() string { - if t == nil { - return "unauthenticated" - } - return []string{ - "verified checksum", - "signed by HashiCorp", - "signed by a HashiCorp partner", - "self-signed", - }[t.result] -} - -// ThirdPartySigned returns whether the package was authenticated as signed by a party -// other than HashiCorp. -func (t *PackageAuthenticationResult) ThirdPartySigned() bool { - if t == nil { - return false - } - if t.result == partnerProvider || t.result == communityProvider { - return true - } - - return false -} - -// SigningKey represents a key used to sign packages from a registry, along -// with an optional trust signature from the registry operator. These are -// both in ASCII armored OpenPGP format. -// -// The JSON struct tags represent the field names used by the Registry API. -type SigningKey struct { - ASCIIArmor string `json:"ascii_armor"` - TrustSignature string `json:"trust_signature"` -} - -// PackageAuthentication is an interface implemented by the optional package -// authentication implementations a source may include on its PackageMeta -// objects. -// -// A PackageAuthentication implementation is responsible for authenticating -// that a package is what its distributor intended to distribute and that it -// has not been tampered with. -type PackageAuthentication interface { - // AuthenticatePackage takes the local location of a package (which may or - // may not be the same as the original source location), and returns a - // PackageAuthenticationResult, or an error if the authentication checks - // fail. - // - // The local location is guaranteed not to be a PackageHTTPURL: a remote - // package will always be staged locally for inspection first. - AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) -} - -type packageAuthenticationAll []PackageAuthentication - -// PackageAuthenticationAll combines several authentications together into a -// single check value, which passes only if all of the given ones pass. -// -// The checks are processed in the order given, so a failure of an earlier -// check will prevent execution of a later one. -// -// The returned result is from the last authentication, so callers should -// take care to order the authentications such that the strongest is last. -func PackageAuthenticationAll(checks ...PackageAuthentication) PackageAuthentication { - return packageAuthenticationAll(checks) -} - -func (checks packageAuthenticationAll) AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) { - var authResult *PackageAuthenticationResult - for _, check := range checks { - var err error - authResult, err = check.AuthenticatePackage(localLocation) - if err != nil { - return authResult, err - } - } - return authResult, nil -} - -type archiveHashAuthentication struct { - WantSHA256Sum [sha256.Size]byte -} - -// NewArchiveChecksumAuthentication returns a PackageAuthentication -// implementation that checks that the original distribution archive matches -// the given hash. -// -// This authentication is suitable only for PackageHTTPURL and -// PackageLocalArchive source locations, because the unpacked layout -// (represented by PackageLocalDir) does not retain access to the original -// source archive. Therefore this authenticator will return an error if its -// given localLocation is not PackageLocalArchive. -func NewArchiveChecksumAuthentication(wantSHA256Sum [sha256.Size]byte) PackageAuthentication { - return archiveHashAuthentication{wantSHA256Sum} -} - -func (a archiveHashAuthentication) AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) { - archiveLocation, ok := localLocation.(PackageLocalArchive) - if !ok { - // A source should not use this authentication type for non-archive - // locations. - return nil, fmt.Errorf("cannot check archive hash for non-archive location %s", localLocation) - } - - f, err := os.Open(string(archiveLocation)) - if err != nil { - return nil, err - } - defer f.Close() - - h := sha256.New() - _, err = io.Copy(h, f) - if err != nil { - return nil, err - } - - gotHash := h.Sum(nil) - if !bytes.Equal(gotHash, a.WantSHA256Sum[:]) { - return nil, fmt.Errorf("archive has incorrect SHA-256 checksum %x (expected %x)", gotHash, a.WantSHA256Sum[:]) - } - return &PackageAuthenticationResult{result: verifiedChecksum}, nil -} - -type matchingChecksumAuthentication struct { - Document []byte - Filename string - WantSHA256Sum [sha256.Size]byte -} - -// NewMatchingChecksumAuthentication returns a PackageAuthentication -// implementation that scans a registry-provided SHA256SUMS document for a -// specified filename, and compares the SHA256 hash against the expected hash. -// This is necessary to ensure that the signed SHA256SUMS document matches the -// declared SHA256 hash for the package, and therefore that a valid signature -// of this document authenticates the package. -// -// This authentication always returns a nil result, since it alone cannot offer -// any assertions about package integrity. It should be combined with other -// authentications to be useful. -func NewMatchingChecksumAuthentication(document []byte, filename string, wantSHA256Sum [sha256.Size]byte) PackageAuthentication { - return matchingChecksumAuthentication{ - Document: document, - Filename: filename, - WantSHA256Sum: wantSHA256Sum, - } -} - -func (m matchingChecksumAuthentication) AuthenticatePackage(location PackageLocation) (*PackageAuthenticationResult, error) { - // Find the checksum in the list with matching filename. The document is - // in the form "0123456789abcdef filename.zip". - filename := []byte(m.Filename) - var checksum []byte - for _, line := range bytes.Split(m.Document, []byte("\n")) { - parts := bytes.Fields(line) - if len(parts) > 1 && bytes.Equal(parts[1], filename) { - checksum = parts[0] - break - } - } - if checksum == nil { - return nil, fmt.Errorf("checksum list has no SHA-256 hash for %q", m.Filename) - } - - // Decode the ASCII checksum into a byte array for comparison. - var gotSHA256Sum [sha256.Size]byte - if _, err := hex.Decode(gotSHA256Sum[:], checksum); err != nil { - return nil, fmt.Errorf("checksum list has invalid SHA256 hash %q: %s", string(checksum), err) - } - - // If the checksums don't match, authentication fails. - if !bytes.Equal(gotSHA256Sum[:], m.WantSHA256Sum[:]) { - return nil, fmt.Errorf("checksum list has unexpected SHA-256 hash %x (expected %x)", gotSHA256Sum, m.WantSHA256Sum[:]) - } - - // Success! But this doesn't result in any real authentication, only a - // lack of authentication errors, so we return a nil result. - return nil, nil -} - -type signatureAuthentication struct { - Document []byte - Signature []byte - Keys []SigningKey -} - -// NewSignatureAuthentication returns a PackageAuthentication implementation -// that verifies the cryptographic signature for a package against any of the -// provided keys. -// -// The signing key for a package will be auto detected by attempting each key -// in turn until one is successful. If such a key is found, there are three -// possible successful authentication results: -// -// 1. If the signing key is the HashiCorp official key, it is an official -// provider; -// 2. Otherwise, if the signing key has a trust signature from the HashiCorp -// Partners key, it is a partner provider; -// 3. If neither of the above is true, it is a community provider. -// -// Any failure in the process of validating the signature will result in an -// unauthenticated result. -func NewSignatureAuthentication(document, signature []byte, keys []SigningKey) PackageAuthentication { - return signatureAuthentication{ - Document: document, - Signature: signature, - Keys: keys, - } -} - -func (s signatureAuthentication) AuthenticatePackage(location PackageLocation) (*PackageAuthenticationResult, error) { - // Find the key that signed the checksum file. This can fail if there is no - // valid signature for any of the provided keys. - signingKey, keyID, err := s.findSigningKey() - if err != nil { - return nil, err - } - - // Verify the signature using the HashiCorp public key. If this succeeds, - // this is an official provider. - hashicorpKeyring, err := openpgp.ReadArmoredKeyRing(strings.NewReader(HashicorpPublicKey)) - if err != nil { - return nil, fmt.Errorf("error creating HashiCorp keyring: %s", err) - } - _, err = openpgp.CheckDetachedSignature(hashicorpKeyring, bytes.NewReader(s.Document), bytes.NewReader(s.Signature)) - if err == nil { - return &PackageAuthenticationResult{result: officialProvider, KeyID: keyID}, nil - } - - // If the signing key has a trust signature, attempt to verify it with the - // HashiCorp partners public key. - if signingKey.TrustSignature != "" { - hashicorpPartnersKeyring, err := openpgp.ReadArmoredKeyRing(strings.NewReader(HashicorpPartnersKey)) - if err != nil { - return nil, fmt.Errorf("error creating HashiCorp Partners keyring: %s", err) - } - - authorKey, err := openpgpArmor.Decode(strings.NewReader(signingKey.ASCIIArmor)) - if err != nil { - return nil, fmt.Errorf("error decoding signing key: %s", err) - } - - trustSignature, err := openpgpArmor.Decode(strings.NewReader(signingKey.TrustSignature)) - if err != nil { - return nil, fmt.Errorf("error decoding trust signature: %s", err) - } - - _, err = openpgp.CheckDetachedSignature(hashicorpPartnersKeyring, authorKey.Body, trustSignature.Body) - if err != nil { - return nil, fmt.Errorf("error verifying trust signature: %s", err) - } - - return &PackageAuthenticationResult{result: partnerProvider, KeyID: keyID}, nil - } - - // We have a valid signature, but it's not from the HashiCorp key, and it - // also isn't a trusted partner. This is a community provider. - return &PackageAuthenticationResult{result: communityProvider, KeyID: keyID}, nil -} - -// findSigningKey attempts to verify the signature using each of the keys -// returned by the registry. If a valid signature is found, it returns the -// signing key. -// -// Note: currently the registry only returns one key, but this may change in -// the future. -func (s signatureAuthentication) findSigningKey() (*SigningKey, string, error) { - for _, key := range s.Keys { - keyring, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key.ASCIIArmor)) - if err != nil { - return nil, "", fmt.Errorf("error decoding signing key: %s", err) - } - - entity, err := openpgp.CheckDetachedSignature(keyring, bytes.NewReader(s.Document), bytes.NewReader(s.Signature)) - - // If the signature issuer does not match the the key, keep trying the - // rest of the provided keys. - if err == openpgpErrors.ErrUnknownIssuer { - continue - } - - // Any other signature error is terminal. - if err != nil { - return nil, "", fmt.Errorf("error checking signature: %s", err) - } - - keyID := "n/a" - if entity.PrimaryKey != nil { - keyID = entity.PrimaryKey.KeyIdString() - } - - log.Printf("[DEBUG] Provider signed by %s", entityString(entity)) - return &key, keyID, nil - } - - // If none of the provided keys issued the signature, this package is - // unsigned. This is currently a terminal authentication error. - return nil, "", fmt.Errorf("authentication signature from unknown issuer") -} - -// entityString extracts the key ID and identity name(s) from an openpgp.Entity -// for logging. -func entityString(entity *openpgp.Entity) string { - if entity == nil { - return "" - } - - keyID := "n/a" - if entity.PrimaryKey != nil { - keyID = entity.PrimaryKey.KeyIdString() - } - - var names []string - for _, identity := range entity.Identities { - names = append(names, identity.Name) - } - - return fmt.Sprintf("%s %s", keyID, strings.Join(names, ", ")) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/public_keys.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/public_keys.go deleted file mode 100644 index bbbcdc80..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/public_keys.go +++ /dev/null @@ -1,89 +0,0 @@ -package getproviders - -// HashicorpPublicKey is the HashiCorp public key, also available at -// https://www.hashicorp.com/security -const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` - -// HashicorpPartnersKey is a key created by HashiCorp, used to generate and -// verify trust signatures for Partner tier providers. -const HashicorpPartnersKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBF5vdGkBEADKi3Nm83oqMcar+YSDFKBup7+/Ty7m+SldtDH4/RWT0vgVHuQ1 -0joA+TrjITR5/aBVQ1/i2pOiBiImnaWsykccjFw9f9AuJqHo520YrAbNCeA6LuGH -Gvz4u0ReL/Cjbb9xCb34tejmrVOX+tmyiYBQd+oTae3DiyffOI9HxF6v+IKhOFKz -Grs3/R5MDwU1ZQIXTO2bdBOM67XBwvTUC+dy6Nem5UmmwuCI0Qz/JWTGndG8aGDC -EO9+DJ59/IwzBYlbs11iqdfqiGALNr+4FXTwftsxZOGpyxhjyAK00U2PP+gQ/wOK -aeIOL7qpF94GdyVrZzDeMKVLUDmhXxDhyatG4UueRJVAoqNVvAFfEwavpYUrVpYl -se/ZugCcTc9VeDodA4r4VI8yQQW805C+uZ/Q+Ym4r+xTsKcTyC4er4ogXgrMT73B -9sgA2M1B4oGbMN5IuG/L2C9JZ1Tob0h0fX+UGMOvrpWeJkZEKTU8hm4mZwhxeRdL -rrcqs6sewNPRnSiUlxz9ynJuf8vFNAD79Z6H9lULe6FnPuLImzH78FKH9QMQsoAW -z1GlYDrxNs3rHDTkSmvglwmWKpsfCxUnfq4ecsYtroCDjAwhLsf2qO1WlXD8B53h -6LU5DwPo7jJDpOv4B0YbjGuAJCf0oXmhXqdu9te6ybXb84ArtHlVO4EBRQARAQAB -tFFIYXNoaUNvcnAgU2VjdXJpdHkgKFRlcnJhZm9ybSBQYXJ0bmVyIFNpZ25pbmcp -IDxzZWN1cml0eSt0ZXJyYWZvcm1AaGFzaGljb3JwLmNvbT6JAk4EEwEIADgWIQRR -iQZXxazbS4IwhlZ9ctQmjkZg/AUCXm90aQIbAwULCQgHAgYVCgkICwIEFgIDAQIe -AQIXgAAKCRB9ctQmjkZg/LxFEACACTHlqULv38VCteo8UR4sRFcaSK4kwzXyRLI2 -oi3tnGdzc9AJ5Brp6/GwcERz0za3NU6LJ5kI7umHhuSb+FOjzQKLbttfKL+bTiNH -HY9NyJPhr6wKJs4Mh8HJ7/FdU7Tsg0cpayNvO5ilU3Mf7H1zaWOVut8BFRYqXGKi -K5/GGmw9C6QwaVSxR4i2kcZYUk4mnTikug53/4sQGnD3zScpDjipEqGTBMLk4r+E -0792MZFRAYRIMmZ0NfaMoIGE7bnmtMrbqtNiw+VaPILk6EyDVK3XJxNDBY/4kwHW -4pDa/qjD7nCL7LapP6NN8sDE++l2MSveorzjtR2yV+goqK1yV0VL2X8zwk1jANX7 -HatY6eKJwkx72BpL5N3ps915Od7kc/k7HdDgyoFQCOkuz9nHr7ix1ioltDcaEXwQ -qTv33M21uG7muNlFsEav2yInPGmIRRqBaGg/5AjF8v1mnGOjzJKNMCIEXIpkYoPS -fY9wud2s9DvHHvVuF+pT8YtmJDqKdGVAgv+VAH8z6zeIRaQXRRrbzFaCIozmz3qF -RLPixaPhcw5EHB7MhWBVDnsPXJG811KjMxCrW57ldeBsbR+cEKydEpYFnSjwksGy -FrCFPA4Vol/ks/ldotS7P9FDmYs7VfB0fco4fdyvwnxksRCfY1kg0dJA3Q0uj/uD -MoBzF7kCDQReb3RpARAAr1uZ2iRuoFRTBiI2Ao9Mn2Nk0B+WEWT+4S6oDSuryf+6 -sKI9Z+wgSvp7DOKyNARoqv+hnjA5Z+t7y/2K7fZP4TYpqOKw8NRKIUoNH0U2/YED -LN0FlXKuVdXtqfijoRZF/W/UyEMVRpub0yKwQDgsijoUDXIG1INVO/NSMGh5UJxE -I+KoU+oIahNPSTgHPizqhJ5OEYkMMfvIr5eHErtB9uylqifVDlvojeHyzU46XmGw -QLxYzufzLYoeBx9uZjZWIlxpxD2mVPmAYVJtDE0uKRZ29+fnlcxWzhx7Ow+wSVRp -XLwDLxZh1YJseY/cGj6yzjA8NolG1fx94PRD1iF7VukHJ3LkukK3+Iw2o4JKmrFx -FpVVcEoldb4bNRMnbY0KDOXn0/9LM+lhEnCRAo8y5zDO6kmjA56emy4iPHRBlngJ -Egms8wnuKsgNkYG8uRaa6zC9FOY/4MbXtNPg8j3pPlWr5jQVdy053uB9UqGs7y3a -C1z9bII58Otp8p4Hf5W97MNuXTxPgPDNmWXA6xu7k2+aut8dgvgz1msHTs31bTeG -X4iRt23/XWlIy56Jar6NkV74rdiKevAbJRHp/sj9AIR4h0pm4yCjZSEKmMqELj7L -nVSj0s9VSL0algqK5yXLoj6gYUWFfcuHcypnRGvjrpDzGgD9AKrDsmQ3pxFflZ8A -EQEAAYkCNgQYAQgAIBYhBFGJBlfFrNtLgjCGVn1y1CaORmD8BQJeb3RpAhsMAAoJ -EH1y1CaORmD89rUP/0gszqvnU3oXo1lMiwz44EfHDGWeY6sh1pJS0FfyjefIMEzE -rAJvyWXbzRj+Dd2g7m7p5JUf/UEMO6EFdxe1l6IihHJBs+pC6hliFwlGosfJwVc2 -wtPg6okAfFI35RBedvrV3uzq01dqFlb+d85Gl24du6nOv6eBXiZ8Pr9F3zPDHLPw -DTP/RtNDxnw8KOC0Z0TE9iQIY1rJCI2mekJ4btHRQ2q9eZQjGFp5HcHBXs/D2ZXC -H/vwB0UskHrtduEUSeTgKkKuPuxbCU5rhE8RGprS41KLYozveD0r5BPa9kBx7qYZ -iOHgWfwlJ4yRjgjtoZl4E9/7aGioYycHNG26UZ+ZHgwTwtDrTU+LP89WrhzoOQmq -H0oU4P/oMe2YKnG6FgCWt8h+31Q08G5VJeXNUoOn+RG02M7HOMHYGeP5wkzAy2HY -I4iehn+A3Cwudv8Gh6WaRqPjLGbk9GWr5fAUG3KLUgJ8iEqnt0/waP7KD78TVId8 -DgHymHMvAU+tAxi5wUcC3iQYrBEc1X0vcsRcW6aAi2Cxc/KEkVCz+PJ+HmFVZakS -V+fniKpSnhUlDkwlG5dMGhkGp/THU3u8oDb3rSydRPcRXVe1D0AReUFE2rDOeRoT -VYF2OtVmpc4ntcRyrItyhSkR/m7BQeBFIT8GQvbTmrCDQgrZCsFsIwxd4Cb4 -=5/s+ ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/registry_client.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/registry_client.go deleted file mode 100644 index 14bb016e..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/registry_client.go +++ /dev/null @@ -1,565 +0,0 @@ -package getproviders - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "path" - "strconv" - "time" - - "github.com/hashicorp/go-retryablehttp" - svchost "github.com/hashicorp/terraform-svchost" - svcauth "github.com/hashicorp/terraform-svchost/auth" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/httpclient" - "github.com/hashicorp/terraform/version" -) - -const ( - terraformVersionHeader = "X-Terraform-Version" - - // registryDiscoveryRetryEnvName is the name of the environment variable that - // can be configured to customize number of retries for module and provider - // discovery requests with the remote registry. - registryDiscoveryRetryEnvName = "TF_REGISTRY_DISCOVERY_RETRY" - defaultRetry = 1 - - // registryClientTimeoutEnvName is the name of the environment variable that - // can be configured to customize the timeout duration (seconds) for module - // and provider discovery with the remote registry. - registryClientTimeoutEnvName = "TF_REGISTRY_CLIENT_TIMEOUT" - - // defaultRequestTimeout is the default timeout duration for requests to the - // remote registry. - defaultRequestTimeout = 10 * time.Second -) - -var ( - discoveryRetry int - requestTimeout time.Duration -) - -func init() { - configureDiscoveryRetry() - configureRequestTimeout() -} - -var SupportedPluginProtocols = MustParseVersionConstraints("~> 5") - -// registryClient is a client for the provider registry protocol that is -// specialized only for the needs of this package. It's not intended as a -// general registry API client. -type registryClient struct { - baseURL *url.URL - creds svcauth.HostCredentials - - httpClient *retryablehttp.Client -} - -func newRegistryClient(baseURL *url.URL, creds svcauth.HostCredentials) *registryClient { - httpClient := httpclient.New() - httpClient.Timeout = requestTimeout - - retryableClient := retryablehttp.NewClient() - retryableClient.HTTPClient = httpClient - retryableClient.RetryMax = discoveryRetry - retryableClient.RequestLogHook = requestLogHook - retryableClient.ErrorHandler = maxRetryErrorHandler - - logOutput, err := logging.LogOutput() - if err != nil { - log.Printf("[WARN] Failed to set up registry client logger, "+ - "continuing without client logging: %s", err) - } - retryableClient.Logger = log.New(logOutput, "", log.Flags()) - - return ®istryClient{ - baseURL: baseURL, - creds: creds, - httpClient: retryableClient, - } -} - -// ProviderVersions returns the raw version and protocol strings produced by the -// registry for the given provider. -// -// The returned error will be ErrRegistryProviderNotKnown if the registry responds with -// 404 Not Found to indicate that the namespace or provider type are not known, -// ErrUnauthorized if the registry responds with 401 or 403 status codes, or -// ErrQueryFailed for any other protocol or operational problem. -func (c *registryClient) ProviderVersions(addr addrs.Provider) (map[string][]string, error) { - endpointPath, err := url.Parse(path.Join(addr.Namespace, addr.Type, "versions")) - if err != nil { - // Should never happen because we're constructing this from - // already-validated components. - return nil, err - } - endpointURL := c.baseURL.ResolveReference(endpointPath) - - req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) - if err != nil { - return nil, err - } - c.addHeadersToRequest(req.Request) - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, c.errQueryFailed(addr, err) - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // Great! - case http.StatusNotFound: - return nil, ErrRegistryProviderNotKnown{ - Provider: addr, - } - case http.StatusUnauthorized, http.StatusForbidden: - return nil, c.errUnauthorized(addr.Hostname) - default: - return nil, c.errQueryFailed(addr, errors.New(resp.Status)) - } - - // We ignore the platforms portion of the response body, because the - // installer verifies the platform compatibility after pulling a provider - // versions' metadata. - type ResponseBody struct { - Versions []struct { - Version string `json:"version"` - Protocols []string `json:"protocols"` - } `json:"versions"` - } - var body ResponseBody - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&body); err != nil { - return nil, c.errQueryFailed(addr, err) - } - - if len(body.Versions) == 0 { - return nil, nil - } - - ret := make(map[string][]string, len(body.Versions)) - for _, v := range body.Versions { - ret[v.Version] = v.Protocols - } - return ret, nil -} - -// PackageMeta returns metadata about a distribution package for a provider. -// -// The returned error will be one of the following: -// -// - ErrPlatformNotSupported if the registry responds with 404 Not Found, -// under the assumption that the caller previously checked that the provider -// and version are valid. -// - ErrProtocolNotSupported if the requested provider version's protocols are not -// supported by this version of terraform. -// - ErrUnauthorized if the registry responds with 401 or 403 status codes -// - ErrQueryFailed for any other operational problem. -func (c *registryClient) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - endpointPath, err := url.Parse(path.Join( - provider.Namespace, - provider.Type, - version.String(), - "download", - target.OS, - target.Arch, - )) - if err != nil { - // Should never happen because we're constructing this from - // already-validated components. - return PackageMeta{}, err - } - endpointURL := c.baseURL.ResolveReference(endpointPath) - - req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) - if err != nil { - return PackageMeta{}, err - } - c.addHeadersToRequest(req.Request) - - resp, err := c.httpClient.Do(req) - if err != nil { - return PackageMeta{}, c.errQueryFailed(provider, err) - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // Great! - case http.StatusNotFound: - return PackageMeta{}, ErrPlatformNotSupported{ - Provider: provider, - Version: version, - Platform: target, - } - case http.StatusUnauthorized, http.StatusForbidden: - return PackageMeta{}, c.errUnauthorized(provider.Hostname) - default: - return PackageMeta{}, c.errQueryFailed(provider, errors.New(resp.Status)) - } - - type SigningKeyList struct { - GPGPublicKeys []*SigningKey `json:"gpg_public_keys"` - } - type ResponseBody struct { - Protocols []string `json:"protocols"` - OS string `json:"os"` - Arch string `json:"arch"` - Filename string `json:"filename"` - DownloadURL string `json:"download_url"` - SHA256Sum string `json:"shasum"` - - SHA256SumsURL string `json:"shasums_url"` - SHA256SumsSignatureURL string `json:"shasums_signature_url"` - - SigningKeys SigningKeyList `json:"signing_keys"` - } - var body ResponseBody - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&body); err != nil { - return PackageMeta{}, c.errQueryFailed(provider, err) - } - - var protoVersions VersionList - for _, versionStr := range body.Protocols { - v, err := ParseVersion(versionStr) - if err != nil { - return PackageMeta{}, c.errQueryFailed( - provider, - fmt.Errorf("registry response includes invalid version string %q: %s", versionStr, err), - ) - } - protoVersions = append(protoVersions, v) - } - protoVersions.Sort() - - // Verify that this version of terraform supports the providers' protocol - // version(s) - if len(protoVersions) > 0 { - supportedProtos := MeetingConstraints(SupportedPluginProtocols) - protoErr := ErrProtocolNotSupported{ - Provider: provider, - Version: version, - } - match := false - for _, version := range protoVersions { - if supportedProtos.Has(version) { - match = true - } - } - if match == false { - // If the protocol version is not supported, try to find the closest - // matching version. - closest, err := c.findClosestProtocolCompatibleVersion(provider, version) - if err != nil { - return PackageMeta{}, err - } - protoErr.Suggestion = closest - return PackageMeta{}, protoErr - } - } - - downloadURL, err := url.Parse(body.DownloadURL) - if err != nil { - return PackageMeta{}, fmt.Errorf("registry response includes invalid download URL: %s", err) - } - downloadURL = resp.Request.URL.ResolveReference(downloadURL) - if downloadURL.Scheme != "http" && downloadURL.Scheme != "https" { - return PackageMeta{}, fmt.Errorf("registry response includes invalid download URL: must use http or https scheme") - } - - ret := PackageMeta{ - Provider: provider, - Version: version, - ProtocolVersions: protoVersions, - TargetPlatform: Platform{ - OS: body.OS, - Arch: body.Arch, - }, - Filename: body.Filename, - Location: PackageHTTPURL(downloadURL.String()), - // "Authentication" is populated below - } - - if len(body.SHA256Sum) != sha256.Size*2 { // *2 because it's hex-encoded - return PackageMeta{}, c.errQueryFailed( - provider, - fmt.Errorf("registry response includes invalid SHA256 hash %q: %s", body.SHA256Sum, err), - ) - } - - var checksum [sha256.Size]byte - _, err = hex.Decode(checksum[:], []byte(body.SHA256Sum)) - if err != nil { - return PackageMeta{}, c.errQueryFailed( - provider, - fmt.Errorf("registry response includes invalid SHA256 hash %q: %s", body.SHA256Sum, err), - ) - } - - shasumsURL, err := url.Parse(body.SHA256SumsURL) - if err != nil { - return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS URL: %s", err) - } - shasumsURL = resp.Request.URL.ResolveReference(shasumsURL) - if shasumsURL.Scheme != "http" && shasumsURL.Scheme != "https" { - return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS URL: must use http or https scheme") - } - document, err := c.getFile(shasumsURL) - if err != nil { - return PackageMeta{}, c.errQueryFailed( - provider, - fmt.Errorf("failed to retrieve authentication checksums for provider: %s", err), - ) - } - signatureURL, err := url.Parse(body.SHA256SumsSignatureURL) - if err != nil { - return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS signature URL: %s", err) - } - signatureURL = resp.Request.URL.ResolveReference(signatureURL) - if signatureURL.Scheme != "http" && signatureURL.Scheme != "https" { - return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS signature URL: must use http or https scheme") - } - signature, err := c.getFile(signatureURL) - if err != nil { - return PackageMeta{}, c.errQueryFailed( - provider, - fmt.Errorf("failed to retrieve cryptographic signature for provider: %s", err), - ) - } - - keys := make([]SigningKey, len(body.SigningKeys.GPGPublicKeys)) - for i, key := range body.SigningKeys.GPGPublicKeys { - keys[i] = *key - } - - ret.Authentication = PackageAuthenticationAll( - NewMatchingChecksumAuthentication(document, body.Filename, checksum), - NewArchiveChecksumAuthentication(checksum), - NewSignatureAuthentication(document, signature, keys), - ) - - return ret, nil -} - -// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. -func (c *registryClient) findClosestProtocolCompatibleVersion(provider addrs.Provider, version Version) (Version, error) { - var match Version - available, err := c.ProviderVersions(provider) - if err != nil { - return UnspecifiedVersion, err - } - - // extract the maps keys so we can make a sorted list of available versions. - versionList := make(VersionList, 0, len(available)) - for versionStr := range available { - v, err := ParseVersion(versionStr) - if err != nil { - return UnspecifiedVersion, ErrQueryFailed{ - Provider: provider, - Wrapped: fmt.Errorf("registry response includes invalid version string %q: %s", versionStr, err), - } - } - versionList = append(versionList, v) - } - versionList.Sort() // lowest precedence first, preserving order when equal precedence - - protoVersions := MeetingConstraints(SupportedPluginProtocols) -FindMatch: - // put the versions in increasing order of precedence - for index := len(versionList) - 1; index >= 0; index-- { // walk backwards to consider newer versions first - for _, protoStr := range available[versionList[index].String()] { - p, err := ParseVersion(protoStr) - if err != nil { - return UnspecifiedVersion, ErrQueryFailed{ - Provider: provider, - Wrapped: fmt.Errorf("registry response includes invalid protocol string %q: %s", protoStr, err), - } - } - if protoVersions.Has(p) { - match = versionList[index] - break FindMatch - } - } - } - return match, nil -} - -// LegacyProviderDefaultNamespace returns the raw address strings produced by -// the registry when asked about the given unqualified provider type name. -// The returned namespace string is taken verbatim from the registry's response. -// -// This method exists only to allow compatibility with unqualified names -// in older configurations. New configurations should be written so as not to -// depend on it. -func (c *registryClient) LegacyProviderDefaultNamespace(typeName string) (string, error) { - endpointPath, err := url.Parse(path.Join("-", typeName, "versions")) - if err != nil { - // Should never happen because we're constructing this from - // already-validated components. - return "", err - } - endpointURL := c.baseURL.ResolveReference(endpointPath) - - req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) - if err != nil { - return "", err - } - c.addHeadersToRequest(req.Request) - - // This is just to give us something to return in error messages. It's - // not a proper provider address. - placeholderProviderAddr := addrs.NewLegacyProvider(typeName) - - resp, err := c.httpClient.Do(req) - if err != nil { - return "", c.errQueryFailed(placeholderProviderAddr, err) - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // Great! - case http.StatusNotFound: - return "", ErrProviderNotFound{ - Provider: placeholderProviderAddr, - } - case http.StatusUnauthorized, http.StatusForbidden: - return "", c.errUnauthorized(placeholderProviderAddr.Hostname) - default: - return "", c.errQueryFailed(placeholderProviderAddr, errors.New(resp.Status)) - } - - type ResponseBody struct { - Id string - } - var body ResponseBody - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&body); err != nil { - return "", c.errQueryFailed(placeholderProviderAddr, err) - } - - provider, diags := addrs.ParseProviderSourceString(body.Id) - if diags.HasErrors() { - return "", fmt.Errorf("Error parsing provider ID from Registry: %s", diags.Err()) - } - - if provider.Type != typeName { - return "", fmt.Errorf("Registry returned provider with type %q, expected %q", provider.Type, typeName) - } - - return provider.Namespace, nil -} - -func (c *registryClient) addHeadersToRequest(req *http.Request) { - if c.creds != nil { - c.creds.PrepareRequest(req) - } - req.Header.Set(terraformVersionHeader, version.String()) -} - -func (c *registryClient) errQueryFailed(provider addrs.Provider, err error) error { - return ErrQueryFailed{ - Provider: provider, - Wrapped: err, - } -} - -func (c *registryClient) errUnauthorized(hostname svchost.Hostname) error { - return ErrUnauthorized{ - Hostname: hostname, - HaveCredentials: c.creds != nil, - } -} - -func (c *registryClient) getFile(url *url.URL) ([]byte, error) { - resp, err := c.httpClient.Get(url.String()) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s", resp.Status) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return data, err - } - - return data, nil -} - -// configureDiscoveryRetry configures the number of retries the registry client -// will attempt for requests with retryable errors, like 502 status codes -func configureDiscoveryRetry() { - discoveryRetry = defaultRetry - - if v := os.Getenv(registryDiscoveryRetryEnvName); v != "" { - retry, err := strconv.Atoi(v) - if err == nil && retry > 0 { - discoveryRetry = retry - } - } -} - -func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { - if i > 0 { - logger.Printf("[INFO] Previous request to the remote registry failed, attempting retry.") - } -} - -func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { - // Close the body per library instructions - if resp != nil { - resp.Body.Close() - } - - // Additional error detail: if we have a response, use the status code; - // if we have an error, use that; otherwise nothing. We will never have - // both response and error. - var errMsg string - if resp != nil { - errMsg = fmt.Sprintf(": %d", resp.StatusCode) - } else if err != nil { - errMsg = fmt.Sprintf(": %s", err) - } - - // This function is always called with numTries=RetryMax+1. If we made any - // retry attempts, include that in the error message. - if numTries > 1 { - return resp, fmt.Errorf("the request failed after %d attempts, please try again later%s", - numTries, errMsg) - } - return resp, fmt.Errorf("the request failed, please try again later%s", errMsg) -} - -// configureRequestTimeout configures the registry client request timeout from -// environment variables -func configureRequestTimeout() { - requestTimeout = defaultRequestTimeout - - if v := os.Getenv(registryClientTimeoutEnvName); v != "" { - timeout, err := strconv.Atoi(v) - if err == nil && timeout > 0 { - requestTimeout = time.Duration(timeout) * time.Second - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/registry_source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/registry_source.go deleted file mode 100644 index 042a028f..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/registry_source.go +++ /dev/null @@ -1,173 +0,0 @@ -package getproviders - -import ( - "fmt" - - svchost "github.com/hashicorp/terraform-svchost" - disco "github.com/hashicorp/terraform-svchost/disco" - - "github.com/hashicorp/terraform/addrs" -) - -// RegistrySource is a Source that knows how to find and install providers from -// their originating provider registries. -type RegistrySource struct { - services *disco.Disco -} - -var _ Source = (*RegistrySource)(nil) - -// NewRegistrySource creates and returns a new source that will install -// providers from their originating provider registries. -func NewRegistrySource(services *disco.Disco) *RegistrySource { - return &RegistrySource{ - services: services, - } -} - -// AvailableVersions returns all of the versions available for the provider -// with the given address, or an error if that result cannot be determined. -// -// If the request fails, the returned error might be an value of -// ErrHostNoProviders, ErrHostUnreachable, ErrUnauthenticated, -// ErrProviderNotKnown, or ErrQueryFailed. Callers must be defensive and -// expect errors of other types too, to allow for future expansion. -func (s *RegistrySource) AvailableVersions(provider addrs.Provider) (VersionList, error) { - client, err := s.registryClient(provider.Hostname) - if err != nil { - return nil, err - } - - versionProtosMap, err := client.ProviderVersions(provider) - if err != nil { - return nil, err - } - - if len(versionProtosMap) == 0 { - return nil, nil - } - - // We ignore everything except the version numbers here because our goal - // is to find out which versions are available _at all_. Which ones are - // compatible with the current Terraform becomes relevant only once we've - // selected one, at which point we'll return an error if the selected one - // is incompatible. - // - // We intentionally produce an error on incompatibility, rather than - // silently ignoring an incompatible version, in order to give the user - // explicit feedback about why their selection wasn't valid and allow them - // to decide whether to fix that by changing the selection or by some other - // action such as upgrading Terraform, using a different OS to run - // Terraform, etc. Changes that affect compatibility are considered - // breaking changes from a provider API standpoint, so provider teams - // should change compatibility only in new major versions. - ret := make(VersionList, 0, len(versionProtosMap)) - for str := range versionProtosMap { - v, err := ParseVersion(str) - if err != nil { - return nil, ErrQueryFailed{ - Provider: provider, - Wrapped: fmt.Errorf("registry response includes invalid version string %q: %s", str, err), - } - } - ret = append(ret, v) - } - ret.Sort() // lowest precedence first, preserving order when equal precedence - return ret, nil -} - -// PackageMeta returns metadata about the location and capabilities of -// a distribution package for a particular provider at a particular version -// targeting a particular platform. -// -// Callers of PackageMeta should first call AvailableVersions and pass -// one of the resulting versions to this function. This function cannot -// distinguish between a version that is not available and an unsupported -// target platform, so if it encounters either case it will return an error -// suggesting that the target platform isn't supported under the assumption -// that the caller already checked that the version is available at all. -// -// To find a package suitable for the platform where the provider installation -// process is running, set the "target" argument to -// getproviders.CurrentPlatform. -// -// If the request fails, the returned error might be an value of -// ErrHostNoProviders, ErrHostUnreachable, ErrUnauthenticated, -// ErrPlatformNotSupported, or ErrQueryFailed. Callers must be defensive and -// expect errors of other types too, to allow for future expansion. -func (s *RegistrySource) PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { - client, err := s.registryClient(provider.Hostname) - if err != nil { - return PackageMeta{}, err - } - - return client.PackageMeta(provider, version, target) -} - -// LookupLegacyProviderNamespace is a special method available only on -// RegistrySource which can deal with legacy provider addresses that contain -// only a type and leave the namespace implied. -// -// It asks the registry at the given hostname to provide a default namespace -// for the given provider type, which can be combined with the given hostname -// and type name to produce a fully-qualified provider address. -// -// Not all unqualified type names can be resolved to a default namespace. If -// the request fails, this method returns an error describing the failure. -// -// This method exists only to allow compatibility with unqualified names -// in older configurations. New configurations should be written so as not to -// depend on it, and this fallback mechanism will likely be removed altogether -// in a future Terraform version. -func (s *RegistrySource) LookupLegacyProviderNamespace(hostname svchost.Hostname, typeName string) (string, error) { - client, err := s.registryClient(hostname) - if err != nil { - return "", err - } - return client.LegacyProviderDefaultNamespace(typeName) -} - -func (s *RegistrySource) registryClient(hostname svchost.Hostname) (*registryClient, error) { - host, err := s.services.Discover(hostname) - if err != nil { - return nil, ErrHostUnreachable{ - Hostname: hostname, - Wrapped: err, - } - } - - url, err := host.ServiceURL("providers.v1") - switch err := err.(type) { - case nil: - // okay! We'll fall through and return below. - case *disco.ErrServiceNotProvided: - return nil, ErrHostNoProviders{ - Hostname: hostname, - } - case *disco.ErrVersionNotSupported: - return nil, ErrHostNoProviders{ - Hostname: hostname, - HasOtherVersion: true, - } - default: - return nil, ErrHostUnreachable{ - Hostname: hostname, - Wrapped: err, - } - } - - // Check if we have credentials configured for this hostname. - creds, err := s.services.CredentialsForHost(hostname) - if err != nil { - // This indicates that a credentials helper failed, which means we - // can't do anything better than just pass through the helper's - // own error message. - return nil, fmt.Errorf("failed to retrieve credentials for %s: %s", hostname, err) - } - - return newRegistryClient(url, creds), nil -} - -func (s *RegistrySource) ForDisplay(provider addrs.Provider) string { - return fmt.Sprintf("registry %s", provider.Hostname.ForDisplay()) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/source.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/source.go deleted file mode 100644 index 7c1e30d5..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/source.go +++ /dev/null @@ -1,13 +0,0 @@ -package getproviders - -import ( - "github.com/hashicorp/terraform/addrs" -) - -// A Source can query a particular source for information about providers -// that are available to install. -type Source interface { - AvailableVersions(provider addrs.Provider) (VersionList, error) - PackageMeta(provider addrs.Provider, version Version, target Platform) (PackageMeta, error) - ForDisplay(provider addrs.Provider) string -} diff --git a/vendor/github.com/hashicorp/terraform/internal/getproviders/types.go b/vendor/github.com/hashicorp/terraform/internal/getproviders/types.go deleted file mode 100644 index cc416a42..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/getproviders/types.go +++ /dev/null @@ -1,397 +0,0 @@ -package getproviders - -import ( - "fmt" - "runtime" - "sort" - "strings" - - "github.com/apparentlymart/go-versions/versions" - "github.com/apparentlymart/go-versions/versions/constraints" - - "github.com/hashicorp/terraform/addrs" -) - -// Version represents a particular single version of a provider. -type Version = versions.Version - -// UnspecifiedVersion is the zero value of Version, representing the absense -// of a version number. -var UnspecifiedVersion Version = versions.Unspecified - -// VersionList represents a list of versions. It is a []Version with some -// extra methods for convenient filtering. -type VersionList = versions.List - -// VersionSet represents a set of versions, usually describing the acceptable -// versions that can be selected under a particular version constraint provided -// by the end-user. -type VersionSet = versions.Set - -// VersionConstraints represents a set of version constraints, which can -// define the membership of a VersionSet by exclusion. -type VersionConstraints = constraints.IntersectionSpec - -// Requirements gathers together requirements for many different providers -// into a single data structure, as a convenient way to represent the full -// set of requirements for a particular configuration or state or both. -// -// If an entry in a Requirements has a zero-length VersionConstraints then -// that indicates that the provider is required but that any version is -// acceptable. That's different than a provider being absent from the map -// altogether, which means that it is not required at all. -type Requirements map[addrs.Provider]VersionConstraints - -// Merge takes the requirements in the receiever and the requirements in the -// other given value and produces a new set of requirements that combines -// all of the requirements of both. -// -// The resulting requirements will permit only selections that both of the -// source requirements would've allowed. -func (r Requirements) Merge(other Requirements) Requirements { - ret := make(Requirements) - for addr, constraints := range r { - ret[addr] = constraints - } - for addr, constraints := range other { - ret[addr] = append(ret[addr], constraints...) - } - return ret -} - -// Selections gathers together version selections for many different providers. -// -// This is the result of provider installation: a specific version selected -// for each provider given in the requested Requirements, selected based on -// the given version constraints. -type Selections map[addrs.Provider]Version - -// ParseVersion parses a "semver"-style version string into a Version value, -// which is the version syntax we use for provider versions. -func ParseVersion(str string) (Version, error) { - return versions.ParseVersion(str) -} - -// MustParseVersion is a variant of ParseVersion that panics if it encounters -// an error while parsing. -func MustParseVersion(str string) Version { - ret, err := ParseVersion(str) - if err != nil { - panic(err) - } - return ret -} - -// ParseVersionConstraints parses a "Ruby-like" version constraint string -// into a VersionConstraints value. -func ParseVersionConstraints(str string) (VersionConstraints, error) { - return constraints.ParseRubyStyleMulti(str) -} - -// MustParseVersionConstraints is a variant of ParseVersionConstraints that -// panics if it encounters an error while parsing. -func MustParseVersionConstraints(str string) VersionConstraints { - ret, err := ParseVersionConstraints(str) - if err != nil { - panic(err) - } - return ret -} - -// MeetingConstraints returns a version set that contains all of the versions -// that meet the given constraints, specified using the Spec type from the -// constraints package. -func MeetingConstraints(vc VersionConstraints) VersionSet { - return versions.MeetingConstraints(vc) -} - -// Platform represents a target platform that a provider is or might be -// available for. -type Platform struct { - OS, Arch string -} - -func (p Platform) String() string { - return p.OS + "_" + p.Arch -} - -// LessThan returns true if the receiver should sort before the other given -// Platform in an ordered list of platforms. -// -// The ordering is lexical first by OS and then by Architecture. -// This ordering is primarily just to ensure that results of -// functions in this package will be deterministic. The ordering is not -// intended to have any semantic meaning and is subject to change in future. -func (p Platform) LessThan(other Platform) bool { - switch { - case p.OS != other.OS: - return p.OS < other.OS - default: - return p.Arch < other.Arch - } -} - -// ParsePlatform parses a string representation of a platform, like -// "linux_amd64", or returns an error if the string is not valid. -func ParsePlatform(str string) (Platform, error) { - underPos := strings.Index(str, "_") - if underPos < 1 || underPos >= len(str)-2 { - return Platform{}, fmt.Errorf("must be two words separated by an underscore") - } - - os, arch := str[:underPos], str[underPos+1:] - if strings.ContainsAny(os, " \t\n\r") { - return Platform{}, fmt.Errorf("OS portion must not contain whitespace") - } - if strings.ContainsAny(arch, " \t\n\r") { - return Platform{}, fmt.Errorf("architecture portion must not contain whitespace") - } - - return Platform{ - OS: os, - Arch: arch, - }, nil -} - -// CurrentPlatform is the platform where the current program is running. -// -// If attempting to install providers for use on the same system where the -// installation process is running, this is the right platform to use. -var CurrentPlatform = Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, -} - -// PackageMeta represents the metadata related to a particular downloadable -// provider package targeting a single platform. -// -// Package findproviders does no signature verification or protocol version -// compatibility checking of its own. A caller receving a PackageMeta must -// verify that it has a correct signature and supports a protocol version -// accepted by the current version of Terraform before trying to use the -// described package. -type PackageMeta struct { - Provider addrs.Provider - Version Version - - ProtocolVersions VersionList - TargetPlatform Platform - - Filename string - Location PackageLocation - - // Authentication, if non-nil, is a request from the source that produced - // this meta for verification of the target package after it has been - // retrieved from the indicated Location. - // - // Different sources will support different authentication strategies -- - // or possibly no strategies at all -- depending on what metadata they - // have available to them, such as checksums provided out-of-band by the - // original package author, expected signing keys, etc. - // - // If Authentication is non-nil then no authentication is requested. - // This is likely appropriate only for packages that are already available - // on the local system. - Authentication PackageAuthentication -} - -// LessThan returns true if the receiver should sort before the given other -// PackageMeta in a sorted list of PackageMeta. -// -// Sorting preference is given first to the provider address, then to the -// taget platform, and the to the version number (using semver precedence). -// Packages that differ only in semver build metadata have no defined -// precedence and so will always return false. -// -// This ordering is primarily just to maximize the chance that results of -// functions in this package will be deterministic. The ordering is not -// intended to have any semantic meaning and is subject to change in future. -func (m PackageMeta) LessThan(other PackageMeta) bool { - switch { - case m.Provider != other.Provider: - return m.Provider.LessThan(other.Provider) - case m.TargetPlatform != other.TargetPlatform: - return m.TargetPlatform.LessThan(other.TargetPlatform) - case m.Version != other.Version: - return m.Version.LessThan(other.Version) - default: - return false - } -} - -// UnpackedDirectoryPath determines the path under the given base -// directory where SearchLocalDirectory or the FilesystemMirrorSource would -// expect to find an unpacked copy of the receiving PackageMeta. -// -// The result always uses forward slashes as path separator, even on Windows, -// to produce a consistent result on all platforms. Windows accepts both -// direction of slash as long as each individual path string is self-consistent. -func (m PackageMeta) UnpackedDirectoryPath(baseDir string) string { - return UnpackedDirectoryPathForPackage(baseDir, m.Provider, m.Version, m.TargetPlatform) -} - -// PackedFilePath determines the path under the given base -// directory where SearchLocalDirectory or the FilesystemMirrorSource would -// expect to find packed copy (a .zip archive) of the receiving PackageMeta. -// -// The result always uses forward slashes as path separator, even on Windows, -// to produce a consistent result on all platforms. Windows accepts both -// direction of slash as long as each individual path string is self-consistent. -func (m PackageMeta) PackedFilePath(baseDir string) string { - return PackedFilePathForPackage(baseDir, m.Provider, m.Version, m.TargetPlatform) -} - -// PackageLocation represents a location where a provider distribution package -// can be obtained. A value of this type contains one of the following -// concrete types: PackageLocalArchive, PackageLocalDir, or PackageHTTPURL. -type PackageLocation interface { - packageLocation() - String() string -} - -// PackageLocalArchive is the location of a provider distribution archive file -// in the local filesystem. Its value is a local filesystem path using the -// syntax understood by Go's standard path/filepath package on the operating -// system where Terraform is running. -type PackageLocalArchive string - -func (p PackageLocalArchive) packageLocation() {} -func (p PackageLocalArchive) String() string { return string(p) } - -// PackageLocalDir is the location of a directory containing an unpacked -// provider distribution archive in the local filesystem. Its value is a local -// filesystem path using the syntax understood by Go's standard path/filepath -// package on the operating system where Terraform is running. -type PackageLocalDir string - -func (p PackageLocalDir) packageLocation() {} -func (p PackageLocalDir) String() string { return string(p) } - -// PackageHTTPURL is a provider package location accessible via HTTP. -// Its value is a URL string using either the http: scheme or the https: scheme. -type PackageHTTPURL string - -func (p PackageHTTPURL) packageLocation() {} -func (p PackageHTTPURL) String() string { return string(p) } - -// PackageMetaList is a list of PackageMeta. It's just []PackageMeta with -// some methods for convenient sorting and filtering. -type PackageMetaList []PackageMeta - -func (l PackageMetaList) Len() int { - return len(l) -} - -func (l PackageMetaList) Less(i, j int) bool { - return l[i].LessThan(l[j]) -} - -func (l PackageMetaList) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// Sort performs an in-place, stable sort on the contents of the list, using -// the ordering given by method Less. This ordering is primarily to help -// encourage deterministic results from functions and does not have any -// semantic meaning. -func (l PackageMetaList) Sort() { - sort.Stable(l) -} - -// FilterPlatform constructs a new PackageMetaList that contains only the -// elements of the receiver that are for the given target platform. -// -// Pass CurrentPlatform to filter only for packages targeting the platform -// where this code is running. -func (l PackageMetaList) FilterPlatform(target Platform) PackageMetaList { - var ret PackageMetaList - for _, m := range l { - if m.TargetPlatform == target { - ret = append(ret, m) - } - } - return ret -} - -// FilterProviderExactVersion constructs a new PackageMetaList that contains -// only the elements of the receiver that relate to the given provider address -// and exact version. -// -// The version matching for this function is exact, including matching on -// semver build metadata, because it's intended for handling a single exact -// version selected by the caller from a set of available versions. -func (l PackageMetaList) FilterProviderExactVersion(provider addrs.Provider, version Version) PackageMetaList { - var ret PackageMetaList - for _, m := range l { - if m.Provider == provider && m.Version == version { - ret = append(ret, m) - } - } - return ret -} - -// FilterProviderPlatformExactVersion is a combination of both -// FilterPlatform and FilterProviderExactVersion that filters by all three -// criteria at once. -func (l PackageMetaList) FilterProviderPlatformExactVersion(provider addrs.Provider, platform Platform, version Version) PackageMetaList { - var ret PackageMetaList - for _, m := range l { - if m.Provider == provider && m.Version == version && m.TargetPlatform == platform { - ret = append(ret, m) - } - } - return ret -} - -// VersionConstraintsString returns a UI-oriented string representation of -// a VersionConstraints value. -func VersionConstraintsString(spec VersionConstraints) string { - // (we have our own function for this because the upstream versions - // library prefers to use npm/cargo-style constraint syntax, but - // Terraform prefers Ruby-like. Maybe we can upstream a "RubyLikeString") - // function to do this later, but having this in here avoids blocking on - // that and this is the sort of thing that is unlikely to need ongoing - // maintenance because the version constraint syntax is unlikely to change.) - - var b strings.Builder - for i, sel := range spec { - if i > 0 { - b.WriteString(", ") - } - switch sel.Operator { - case constraints.OpGreaterThan: - b.WriteString("> ") - case constraints.OpLessThan: - b.WriteString("< ") - case constraints.OpGreaterThanOrEqual: - b.WriteString(">= ") - case constraints.OpGreaterThanOrEqualPatchOnly, constraints.OpGreaterThanOrEqualMinorOnly: - // These two differ in how the version is written, not in the symbol. - b.WriteString("~> ") - case constraints.OpLessThanOrEqual: - b.WriteString("<= ") - case constraints.OpEqual: - b.WriteString("") - case constraints.OpNotEqual: - b.WriteString("!= ") - default: - // The above covers all of the operators we support during - // parsing, so we should not get here. - b.WriteString("??? ") - } - - if sel.Operator == constraints.OpGreaterThanOrEqualMinorOnly { - // The minor-pessimistic syntax uses only two version components. - fmt.Fprintf(&b, "%s.%s", sel.Boundary.Major, sel.Boundary.Minor) - } else { - fmt.Fprintf(&b, "%s.%s.%s", sel.Boundary.Major, sel.Boundary.Minor, sel.Boundary.Patch) - } - if sel.Boundary.Prerelease != "" { - b.WriteString("-" + sel.Boundary.Prerelease) - } - if sel.Boundary.Metadata != "" { - b.WriteString("+" + sel.Boundary.Metadata) - } - } - return b.String() -} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go deleted file mode 100644 index 8f89909c..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package blocktoattr includes some helper functions that can perform -// preprocessing on a HCL body where a configschema.Block schema is available -// in order to allow list and set attributes defined in the schema to be -// optionally written by the user as block syntax. -package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go deleted file mode 100644 index 0af708ec..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go +++ /dev/null @@ -1,187 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization -// functionality to allow attributes that are specified as having list or set -// type in the schema to be written with HCL block syntax as multiple nested -// blocks with the attribute name as the block type. -// -// This partially restores some of the block/attribute confusion from HCL 1 -// so that existing patterns that depended on that confusion can continue to -// be used in the short term while we settle on a longer-term strategy. -// -// Most of the fixup work is actually done when the returned body is -// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual -// decode of the body might not, if the content of the body is so ambiguous -// that there's no safe way to map it to the schema. -func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { - // The schema should never be nil, but in practice it seems to be sometimes - // in the presence of poorly-configured test mocks, so we'll be robust - // by synthesizing an empty one. - if schema == nil { - schema = &configschema.Block{} - } - - return &fixupBody{ - original: body, - schema: schema, - names: ambiguousNames(schema), - } -} - -type fixupBody struct { - original hcl.Body - schema *configschema.Block - names map[string]struct{} -} - -// Content decodes content from the body. The given schema must be the lower-level -// representation of the same schema that was previously passed to FixUpBlockAttrs, -// or else the result is undefined. -func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, diags := b.original.Content(schema) - return b.fixupContent(content), diags -} - -func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, remain, diags := b.original.PartialContent(schema) - remain = &fixupBody{ - original: remain, - schema: b.schema, - names: b.names, - } - return b.fixupContent(content), remain, diags -} - -func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // FixUpBlockAttrs is not intended to be used in situations where we'd use - // JustAttributes, so we just pass this through verbatim to complete our - // implementation of hcl.Body. - return b.original.JustAttributes() -} - -func (b *fixupBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} - -// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's -// content to determine whether the author has used attribute or block syntax -// for each of the ambigious attributes where both are permitted. -// -// The resulting schema will always contain all of the same names that are -// in the given schema, but some attribute schemas may instead be replaced by -// block header schemas. -func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { - return effectiveSchema(given, b.original, b.names, true) -} - -func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { - var ret hcl.BodyContent - ret.Attributes = make(hcl.Attributes) - for name, attr := range content.Attributes { - ret.Attributes[name] = attr - } - blockAttrVals := make(map[string][]*hcl.Block) - for _, block := range content.Blocks { - if _, exists := b.names[block.Type]; exists { - // If we get here then we've found a block type whose instances need - // to be re-interpreted as a list-of-objects attribute. We'll gather - // those up and fix them up below. - blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) - continue - } - - // We need to now re-wrap our inner body so it will be subject to the - // same attribute-as-block fixup when recursively decoded. - retBlock := *block // shallow copy - if blockS, ok := b.schema.BlockTypes[block.Type]; ok { - // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then - retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) - } - - ret.Blocks = append(ret.Blocks, &retBlock) - } - // No we'll install synthetic attributes for each of our fixups. We can't - // do this exactly because HCL's information model expects an attribute - // to be a single decl but we have multiple separate blocks. We'll - // approximate things, then, by using only our first block for the source - // location information. (We are guaranteed at least one by the above logic.) - for name, blocks := range blockAttrVals { - ret.Attributes[name] = &hcl.Attribute{ - Name: name, - Expr: &fixupBlocksExpr{ - blocks: blocks, - ety: b.schema.Attributes[name].Type.ElementType(), - }, - - Range: blocks[0].DefRange, - NameRange: blocks[0].TypeRange, - } - } - return &ret -} - -type fixupBlocksExpr struct { - blocks hcl.Blocks - ety cty.Type -} - -func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - // In order to produce a suitable value for our expression we need to - // now decode the whole descendent block structure under each of our block - // bodies. - // - // That requires us to do something rather strange: we must construct a - // synthetic block type schema derived from the element type of the - // attribute, thus inverting our usual direction of lowering a schema - // into an implied type. Because a type is less detailed than a schema, - // the result is imprecise and in particular will just consider all - // the attributes to be optional and let the provider eventually decide - // whether to return errors if they turn out to be null when required. - schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety - spec := schema.DecoderSpec() - - vals := make([]cty.Value, len(e.blocks)) - var diags hcl.Diagnostics - for i, block := range e.blocks { - body := FixUpBlockAttrs(block.Body, schema) - val, blockDiags := hcldec.Decode(body, spec, ctx) - diags = append(diags, blockDiags...) - if val == cty.NilVal { - val = cty.UnknownVal(e.ety) - } - vals[i] = val - } - if len(vals) == 0 { - return cty.ListValEmpty(e.ety), diags - } - return cty.ListVal(vals), diags -} - -func (e *fixupBlocksExpr) Variables() []hcl.Traversal { - var ret []hcl.Traversal - schema := SchemaForCtyElementType(e.ety) - spec := schema.DecoderSpec() - for _, block := range e.blocks { - ret = append(ret, hcldec.Variables(block.Body, spec)...) - } - return ret -} - -func (e *fixupBlocksExpr) Range() hcl.Range { - // This is not really an appropriate range for the expression but it's - // the best we can do from here. - return e.blocks[0].DefRange -} - -func (e *fixupBlocksExpr) StartRange() hcl.Range { - return e.blocks[0].DefRange -} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go deleted file mode 100644 index 31e010cc..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go +++ /dev/null @@ -1,146 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func ambiguousNames(schema *configschema.Block) map[string]struct{} { - if schema == nil { - return nil - } - ambiguousNames := make(map[string]struct{}) - for name, attrS := range schema.Attributes { - aty := attrS.Type - if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { - ambiguousNames[name] = struct{}{} - } - } - return ambiguousNames -} - -func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { - ret := &hcl.BodySchema{} - - appearsAsBlock := make(map[string]struct{}) - { - // We'll construct some throwaway schemas here just to probe for - // whether each of our ambiguous names seems to be being used as - // an attribute or a block. We need to check both because in JSON - // syntax we rely on the schema to decide between attribute or block - // interpretation and so JSON will always answer yes to both of - // these questions and we want to prefer the attribute interpretation - // in that case. - var probeSchema hcl.BodySchema - - for name := range ambiguousNames { - probeSchema = hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: name, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - if _, exists := content.Attributes[name]; exists { - // Can decode as an attribute, so we'll go with that. - continue - } - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: name, - }, - }, - } - content, _, _ = body.PartialContent(&probeSchema) - if len(content.Blocks) > 0 || dynamicExpanded { - // A dynamic block with an empty iterator returns nothing. - // If there's no attribute and we have either a block or a - // dynamic expansion, we need to rewrite this one as a - // block for a successful result. - appearsAsBlock[name] = struct{}{} - } - } - if !dynamicExpanded { - // If we're deciding for a context where dynamic blocks haven't - // been expanded yet then we need to probe for those too. - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "dynamic", - LabelNames: []string{"type"}, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - for _, block := range content.Blocks { - if _, exists := ambiguousNames[block.Labels[0]]; exists { - appearsAsBlock[block.Labels[0]] = struct{}{} - } - } - } - } - - for _, attrS := range given.Attributes { - if _, exists := appearsAsBlock[attrS.Name]; exists { - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: attrS.Name, - }) - } else { - ret.Attributes = append(ret.Attributes, attrS) - } - } - - // Anything that is specified as a block type in the input schema remains - // that way by just passing through verbatim. - ret.Blocks = append(ret.Blocks, given.Blocks...) - - return ret -} - -// SchemaForCtyElementType converts a cty object type into an -// approximately-equivalent configschema.Block representing the element of -// a list or set. If the given type is not an object type then this -// function will panic. -func SchemaForCtyElementType(ty cty.Type) *configschema.Block { - atys := ty.AttributeTypes() - ret := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute, len(atys)), - } - for name, aty := range atys { - ret.Attributes[name] = &configschema.Attribute{ - Type: aty, - Optional: true, - } - } - return ret -} - -// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type -// into an approximately-equivalent configschema.NestedBlock. If the given type -// is not of the expected kind then this function will panic. -func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch { - case ty.IsListType(): - nesting = configschema.NestingList - case ty.IsSetType(): - nesting = configschema.NestingSet - default: - panic("unsuitable type") - } - nested := SchemaForCtyElementType(ty.ElementType()) - return &configschema.NestedBlock{ - Nesting: nesting, - Block: *nested, - } -} - -// TypeCanBeBlocks returns true if the given type is a list-of-object or -// set-of-object type, and would thus be subject to the blocktoattr fixup -// if used as an attribute type. -func TypeCanBeBlocks(ty cty.Type) bool { - return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() -} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go deleted file mode 100644 index ae5c609d..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go +++ /dev/null @@ -1,45 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/configs/configschema" -) - -// ExpandedVariables finds all of the global variables referenced in the -// given body with the given schema while taking into account the possibilities -// both of "dynamic" blocks being expanded and the possibility of certain -// attributes being written instead as nested blocks as allowed by the -// FixUpBlockAttrs function. -// -// This function exists to allow variables to be analyzed prior to dynamic -// block expansion while also dealing with the fact that dynamic block expansion -// might in turn produce nested blocks that are subject to FixUpBlockAttrs. -// -// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, -// which is itself a drop-in replacement for hcldec.Variables. -func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { - rootNode := dynblock.WalkVariables(body) - return walkVariables(rootNode, body, schema) -} - -func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { - givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) - ambiguousNames := ambiguousNames(schema) - effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) - vars, children := node.Visit(effectiveRawSchema) - - for _, child := range children { - if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { - vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) - } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { - // ☝️Check for collection type before element type, because if this is a mis-placed reference, - // a panic here will prevent other useful diags from being elevated to show the user what to fix - synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) - vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go deleted file mode 100644 index a47a2a32..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/data.go +++ /dev/null @@ -1,33 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Data is an interface whose implementations can provide cty.Value -// representations of objects identified by referenceable addresses from -// the addrs package. -// -// This interface will grow each time a new type of reference is added, and so -// implementations outside of the Terraform codebases are not advised. -// -// Each method returns a suitable value and optionally some diagnostics. If the -// returned diagnostics contains errors then the type of the returned value is -// used to construct an unknown value of the same type which is then used in -// place of the requested object so that type checking can still proceed. In -// cases where it's not possible to even determine a suitable result type, -// cty.DynamicVal is returned along with errors describing the problem. -type Data interface { - StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics - - GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModule(addrs.ModuleCall, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/doc.go b/vendor/github.com/hashicorp/terraform/lang/doc.go deleted file mode 100644 index af5c5cac..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package lang deals with the runtime aspects of Terraform's configuration -// language, with concerns such as expression evaluation. It is closely related -// to sibling package "configs", which is responsible for configuration -// parsing and static validation. -package lang diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go deleted file mode 100644 index db5a15a2..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/eval.go +++ /dev/null @@ -1,365 +0,0 @@ -package lang - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang/blocktoattr" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// ExpandBlock expands any "dynamic" blocks present in the given body. The -// result is a body with those blocks expanded, ready to be evaluated with -// EvalBlock. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - traversals := dynblock.ExpandVariablesHCLDec(body, spec) - refs, diags := References(traversals) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - - return dynblock.Expand(body, ctx), diags -} - -// EvalBlock evaluates the given body using the given block schema and returns -// a cty object value representing its contents. The type of the result conforms -// to the implied type of the given schema. -// -// This function does not automatically expand "dynamic" blocks within the -// body. If that is desired, first call the ExpandBlock method to obtain -// an expanded body to pass to this method. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - refs, diags := ReferencesInBlock(body, schema) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(schema.ImpliedType()), diags - } - - // HACK: In order to remain compatible with some assumptions made in - // Terraform v0.11 and earlier about the approximate equivalence of - // attribute vs. block syntax, we do a just-in-time fixup here to allow - // any attribute in the schema that has a list-of-objects or set-of-objects - // kind to potentially be populated instead by one or more nested blocks - // whose type is the attribute name. - body = blocktoattr.FixUpBlockAttrs(body, schema) - - val, evalDiags := hcldec.Decode(body, spec, ctx) - diags = diags.Append(evalDiags) - - return val, diags -} - -// EvalExpr evaluates a single expression in the receiving context and returns -// the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - refs, diags := ReferencesInExpr(expr) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(wantType), diags - } - - val, evalDiags := expr.Value(ctx) - diags = diags.Append(evalDiags) - - if wantType != cty.DynamicPseudoType { - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: expr.Range().Ptr(), - }) - } - } - - return val, diags -} - -// EvalReference evaluates the given reference in the receiving scope and -// returns the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // We cheat a bit here and just build an EvalContext for our requested - // reference with the "self" address overridden, and then pull the "self" - // result out of it to return. - ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) - diags = diags.Append(ctxDiags) - val := ctx.Variables["self"] - if val == cty.NilVal { - val = cty.DynamicVal - } - - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - - return val, diags -} - -// EvalContext constructs a HCL expression evaluation context whose variable -// scope contains sufficient values to satisfy the given set of references. -// -// Most callers should prefer to use the evaluation helper methods that -// this type offers, but this is here for less common situations where the -// caller will handle the evaluation calls itself. -func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { - return s.evalContext(refs, s.SelfAddr) -} - -func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { - if s == nil { - panic("attempt to construct EvalContext for nil Scope") - } - - var diags tfdiags.Diagnostics - vals := make(map[string]cty.Value) - funcs := s.Functions() - ctx := &hcl.EvalContext{ - Variables: vals, - Functions: funcs, - } - - if len(refs) == 0 { - // Easy path for common case where there are no references at all. - return ctx, diags - } - - // First we'll do static validation of the references. This catches things - // early that might otherwise not get caught due to unknown values being - // present in the scope during planning. - if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { - diags = diags.Append(staticDiags) - return ctx, diags - } - - // The reference set we are given has not been de-duped, and so there can - // be redundant requests in it for two reasons: - // - The same item is referenced multiple times - // - Both an item and that item's container are separately referenced. - // We will still visit every reference here and ask our data source for - // it, since that allows us to gather a full set of any errors and - // warnings, but once we've gathered all the data we'll then skip anything - // that's redundant in the process of populating our values map. - dataResources := map[string]map[string]cty.Value{} - managedResources := map[string]map[string]cty.Value{} - wholeModules := map[string]cty.Value{} - inputVariables := map[string]cty.Value{} - localValues := map[string]cty.Value{} - pathAttrs := map[string]cty.Value{} - terraformAttrs := map[string]cty.Value{} - countAttrs := map[string]cty.Value{} - forEachAttrs := map[string]cty.Value{} - var self cty.Value - - for _, ref := range refs { - rng := ref.SourceRange - - rawSubj := ref.Subject - if rawSubj == addrs.Self { - if selfAddr == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - continue - } - - if selfAddr == addrs.Self { - // Programming error: the self address cannot alias itself. - panic("scope SelfAddr attempting to alias itself") - } - - // self can only be used within a resource instance - subj := selfAddr.(addrs.ResourceInstance) - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj.ContainingResource(), rng)) - - diags = diags.Append(valDiags) - - // Self is an exception in that it must always resolve to a - // particular instance. We will still insert the full resource into - // the context below. - var hclDiags hcl.Diagnostics - // We should always have a valid self index by this point, but in - // the case of an error, self may end up as a cty.DynamicValue. - switch k := subj.Key.(type) { - case addrs.IntKey: - self, hclDiags = hcl.Index(val, cty.NumberIntVal(int64(k)), ref.SourceRange.ToHCL().Ptr()) - diags.Append(hclDiags) - case addrs.StringKey: - self, hclDiags = hcl.Index(val, cty.StringVal(string(k)), ref.SourceRange.ToHCL().Ptr()) - diags.Append(hclDiags) - default: - self = val - } - continue - } - - // This type switch must cover all of the "Referenceable" implementations - // in package addrs, however we are removing the possibility of - // Instances beforehand. - switch addr := rawSubj.(type) { - case addrs.ResourceInstance: - rawSubj = addr.ContainingResource() - case addrs.ModuleCallInstance: - rawSubj = addr.Call - case addrs.AbsModuleCallOutput: - rawSubj = addr.Call.Call - } - - switch subj := rawSubj.(type) { - case addrs.Resource: - var into map[string]map[string]cty.Value - switch subj.Mode { - case addrs.ManagedResourceMode: - into = managedResources - case addrs.DataResourceMode: - into = dataResources - default: - panic(fmt.Errorf("unsupported ResourceMode %s", subj.Mode)) - } - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj, rng)) - diags = diags.Append(valDiags) - - r := subj - if into[r.Type] == nil { - into[r.Type] = make(map[string]cty.Value) - } - into[r.Type][r.Name] = val - - case addrs.ModuleCall: - val, valDiags := normalizeRefValue(s.Data.GetModule(subj, rng)) - diags = diags.Append(valDiags) - wholeModules[subj.Name] = val - - case addrs.InputVariable: - val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) - diags = diags.Append(valDiags) - inputVariables[subj.Name] = val - - case addrs.LocalValue: - val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) - diags = diags.Append(valDiags) - localValues[subj.Name] = val - - case addrs.PathAttr: - val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) - diags = diags.Append(valDiags) - pathAttrs[subj.Name] = val - - case addrs.TerraformAttr: - val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) - diags = diags.Append(valDiags) - terraformAttrs[subj.Name] = val - - case addrs.CountAttr: - val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) - diags = diags.Append(valDiags) - countAttrs[subj.Name] = val - - case addrs.ForEachAttr: - val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) - diags = diags.Append(valDiags) - forEachAttrs[subj.Name] = val - - default: - // Should never happen - panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) - } - } - - for k, v := range buildResourceObjects(managedResources) { - vals[k] = v - } - vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) - vals["module"] = cty.ObjectVal(wholeModules) - vals["var"] = cty.ObjectVal(inputVariables) - vals["local"] = cty.ObjectVal(localValues) - vals["path"] = cty.ObjectVal(pathAttrs) - vals["terraform"] = cty.ObjectVal(terraformAttrs) - vals["count"] = cty.ObjectVal(countAttrs) - vals["each"] = cty.ObjectVal(forEachAttrs) - if self != cty.NilVal { - vals["self"] = self - } - - return ctx, diags -} - -func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - for typeName, nameVals := range resources { - vals[typeName] = cty.ObjectVal(nameVals) - } - return vals -} - -func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { - if diags.HasErrors() { - // If there are errors then we will force an unknown result so that - // we can still evaluate and catch type errors but we'll avoid - // producing redundant re-statements of the same errors we've already - // dealt with here. - return cty.UnknownVal(val.Type()), diags - } - return val, diags -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go deleted file mode 100644 index 8c075148..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go +++ /dev/null @@ -1,218 +0,0 @@ -package funcs - -import ( - "fmt" - "net" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CidrHostFunc contructs a function that calculates a full host IP address -// within a given IP network address prefix. -var CidrHostFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "hostnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var hostNum int - if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { - return cty.UnknownVal(cty.String), err - } - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ip.String()), nil - }, -}) - -// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given -// in CIDR notation into a subnet mask address. -var CidrNetmaskFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - return cty.StringVal(net.IP(network.Mask).String()), nil - }, -}) - -// CidrSubnetFunc contructs a function that calculates a subnet address within -// a given IP network address prefix. -var CidrSubnetFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "newbits", - Type: cty.Number, - }, - { - Name: "netnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var newbits int - if err := gocty.FromCtyValue(args[1], &newbits); err != nil { - return cty.UnknownVal(cty.String), err - } - var netnum int - if err := gocty.FromCtyValue(args[2], &netnum); err != nil { - return cty.UnknownVal(cty.String), err - } - - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if newbits > 32 { - return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") - } - - newNetwork, err := cidr.Subnet(network, newbits, netnum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(newNetwork.String()), nil - }, -}) - -// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive -// subnet addresses at once, rather than just a single subnet extension. -var CidrSubnetsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "newbits", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) - } - startPrefixLen, _ := network.Mask.Size() - - prefixLengthArgs := args[1:] - if len(prefixLengthArgs) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - var firstLength int - if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(1, err) - } - firstLength += startPrefixLen - - retVals := make([]cty.Value, len(prefixLengthArgs)) - - current, _ := cidr.PreviousSubnet(network, firstLength) - for i, lengthArg := range prefixLengthArgs { - var length int - if err := gocty.FromCtyValue(lengthArg, &length); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) - } - - if length < 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") - } - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if length > 32 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") - } - length += startPrefixLen - if length > (len(network.IP) * 8) { - protocol := "IP" - switch len(network.IP) * 8 { - case 32: - protocol = "IPv4" - case 128: - protocol = "IPv6" - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) - } - - next, rollover := cidr.NextSubnet(current, length) - if rollover || !network.Contains(next.IP) { - // If we run out of suffix bits in the base CIDR prefix then - // NextSubnet will start incrementing the prefix bits, which - // we don't allow because it would then allocate addresses - // outside of the caller's given prefix. - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) - } - - current = next - retVals[i] = cty.StringVal(current.String()) - } - - return cty.ListVal(retVals), nil - }, -}) - -// CidrHost calculates a full host IP address within a given IP network address prefix. -func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { - return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) -} - -// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. -func CidrNetmask(prefix cty.Value) (cty.Value, error) { - return CidrNetmaskFunc.Call([]cty.Value{prefix}) -} - -// CidrSubnet calculates a subnet address within a given IP network address prefix. -func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { - return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) -} - -// CidrSubnets calculates a sequence of consecutive subnet prefixes that may -// be of different prefix lengths under a common base prefix. -func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(newbits)+1) - args[0] = prefix - copy(args[1:], newbits) - return CidrSubnetsFunc.Call(args) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go deleted file mode 100644 index bc93f8a2..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go +++ /dev/null @@ -1,629 +0,0 @@ -package funcs - -import ( - "errors" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - "github.com/zclconf/go-cty/cty/gocty" -) - -var LengthFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - collTy := args[0].Type() - switch { - case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: - return cty.Number, nil - default: - return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - coll := args[0] - collTy := args[0].Type() - switch { - case collTy == cty.DynamicPseudoType: - return cty.UnknownVal(cty.Number), nil - case collTy.IsTupleType(): - l := len(collTy.TupleElementTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy.IsObjectType(): - l := len(collTy.AttributeTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy == cty.String: - // We'll delegate to the cty stdlib strlen function here, because - // it deals with all of the complexities of tokenizing unicode - // grapheme clusters. - return stdlib.Strlen(coll) - case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): - return coll.Length(), nil - default: - // Should never happen, because of the checks in our Type func above - return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") - } - }, -}) - -// CoalesceFunc constructs a function that takes any number of arguments and -// returns the first one that isn't empty. This function was copied from go-cty -// stdlib and modified so that it returns the first *non-empty* non-null element -// from a sequence, instead of merely the first non-null. -var CoalesceFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - argTypes := make([]cty.Type, len(args)) - for i, val := range args { - argTypes[i] = val.Type() - } - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - return retType, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, argVal := range args { - // We already know this will succeed because of the checks in our Type func above - argVal, _ = convert.Convert(argVal, retType) - if !argVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - if argVal.IsNull() { - continue - } - if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { - continue - } - - return argVal, nil - } - return cty.NilVal, errors.New("no non-null, non-empty-string arguments") - }, -}) - -// IndexFunc constructs a function that finds the element index for a given value in a list. -var IndexFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { - return cty.NilVal, errors.New("argument must be a list or tuple") - } - - if !args[0].IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - - if args[0].LengthInt() == 0 { // Easy path - return cty.NilVal, errors.New("cannot search an empty list") - } - - for it := args[0].ElementIterator(); it.Next(); { - i, v := it.Element() - eq, err := stdlib.Equal(v, args[1]) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - if eq.True() { - return i, nil - } - } - return cty.NilVal, errors.New("item not found") - - }, -}) - -// Flatten until it's not a cty.List, and return whether the value is known. -// We can flatten lists with unknown values, as long as they are not -// lists themselves. -func flattener(flattenList cty.Value) ([]cty.Value, bool) { - out := make([]cty.Value, 0) - for it := flattenList.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { - if !val.IsKnown() { - return out, false - } - - res, known := flattener(val) - if !known { - return res, known - } - out = append(out, res...) - } else { - out = append(out, val) - } - } - return out, true -} - -// ListFunc constructs a function that takes an arbitrary number of arguments -// and returns a list containing those values in the same order. -// -// This function is deprecated in Terraform v0.12 -var ListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - argTypes[i] = arg.Type() - } - - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.List(retType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newList := make([]cty.Value, 0, len(args)) - - for _, arg := range args { - // We already know this will succeed because of the checks in our Type func above - arg, _ = convert.Convert(arg, retType.ElementType()) - newList = append(newList, arg) - } - - return cty.ListVal(newList), nil - }, -}) - -// LookupFunc constructs a function that performs dynamic lookups of map types. -var LookupFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - }, - { - Name: "key", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "default", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 1 || len(args) > 3 { - return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) - } - - ty := args[0].Type() - - switch { - case ty.IsObjectType(): - if !args[1].IsKnown() { - return cty.DynamicPseudoType, nil - } - - key := args[1].AsString() - if ty.HasAttribute(key) { - return args[0].GetAttr(key).Type(), nil - } else if len(args) == 3 { - // if the key isn't found but a default is provided, - // return the default type - return args[2].Type(), nil - } - return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) - case ty.IsMapType(): - if len(args) == 3 { - _, err = convert.Convert(args[2], ty.ElementType()) - if err != nil { - return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") - } - } - return ty.ElementType(), nil - default: - return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var defaultVal cty.Value - defaultValueSet := false - - if len(args) == 3 { - defaultVal = args[2] - defaultValueSet = true - } - - mapVar := args[0] - lookupKey := args[1].AsString() - - if !mapVar.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - if mapVar.Type().IsObjectType() { - if mapVar.Type().HasAttribute(lookupKey) { - return mapVar.GetAttr(lookupKey), nil - } - } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { - return mapVar.Index(cty.StringVal(lookupKey)), nil - } - - if defaultValueSet { - defaultVal, err = convert.Convert(defaultVal, retType) - if err != nil { - return cty.NilVal, err - } - return defaultVal, nil - } - - return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( - "lookup failed to find '%s'", lookupKey) - }, -}) - -// MapFunc constructs a function that takes an even number of arguments and -// returns a map whose elements are constructed from consecutive pairs of arguments. -// -// This function is deprecated in Terraform v0.12 -var MapFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 2 || len(args)%2 != 0 { - return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) - } - - argTypes := make([]cty.Type, len(args)/2) - index := 0 - - for i := 0; i < len(args); i += 2 { - argTypes[index] = args[i+1].Type() - index++ - } - - valType, _ := convert.UnifyUnsafe(argTypes) - if valType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.Map(valType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - } - - outputMap := make(map[string]cty.Value) - - for i := 0; i < len(args); i += 2 { - - keyVal, err := convert.Convert(args[i], cty.String) - if err != nil { - return cty.NilVal, err - } - if keyVal.IsNull() { - return cty.NilVal, fmt.Errorf("argument %d is a null key", i+1) - } - key := keyVal.AsString() - - val := args[i+1] - - var variable cty.Value - err = gocty.FromCtyValue(val, &variable) - if err != nil { - return cty.NilVal, err - } - - // We already know this will succeed because of the checks in our Type func above - variable, _ = convert.Convert(variable, retType.ElementType()) - - // Check for duplicate keys - if _, ok := outputMap[key]; ok { - return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) - } - outputMap[key] = variable - } - - return cty.MapVal(outputMap), nil - }, -}) - -// MatchkeysFunc constructs a function that constructs a new list by taking a -// subset of elements from one list whose indexes match the corresponding -// indexes of values in another list. -var MatchkeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "keys", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "searchset", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - if ty == cty.NilType { - return cty.NilType, errors.New("keys and searchset must be of the same type") - } - - // the return type is based on args[0] (values) - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !args[0].IsKnown() { - return cty.UnknownVal(cty.List(retType.ElementType())), nil - } - - if args[0].LengthInt() != args[1].LengthInt() { - return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") - } - - output := make([]cty.Value, 0) - values := args[0] - - // Keys and searchset must be the same type. - // We can skip error checking here because we've already verified that - // they can be unified in the Type function - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - keys, _ := convert.Convert(args[1], ty) - searchset, _ := convert.Convert(args[2], ty) - - // if searchset is empty, return an empty list. - if searchset.LengthInt() == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - - if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, key := it.Element() - for iter := searchset.ElementIterator(); iter.Next(); { - _, search := iter.Element() - eq, err := stdlib.Equal(key, search) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.ListValEmpty(retType.ElementType()), nil - } - if eq.True() { - v := values.Index(cty.NumberIntVal(int64(i))) - output = append(output, v) - break - } - } - i++ - } - - // if we haven't matched any key, then output is an empty list. - if len(output) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(output), nil - }, -}) - -// SumFunc constructs a function that returns the sum of all -// numbers provided in a list -var SumFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - - if !args[0].CanIterateElements() { - return cty.NilVal, function.NewArgErrorf(0, "cannot sum noniterable") - } - - if args[0].LengthInt() == 0 { // Easy path - return cty.NilVal, function.NewArgErrorf(0, "cannot sum an empty list") - } - - arg := args[0].AsValueSlice() - ty := args[0].Type() - - var i float64 - var s float64 - - if !ty.IsListType() && !ty.IsSetType() && !ty.IsTupleType() { - return cty.NilVal, function.NewArgErrorf(0, fmt.Sprintf("argument must be list, set, or tuple. Received %s", ty.FriendlyName())) - } - - if !args[0].IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - - for _, v := range arg { - - if err := gocty.FromCtyValue(v, &i); err != nil { - return cty.UnknownVal(cty.Number), function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") - } else { - s += i - } - } - - return cty.NumberFloatVal(s), nil - }, -}) - -// TransposeFunc constructs a function that takes a map of lists of strings and -// swaps the keys and values to produce a new map of lists of strings. -var TransposeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.Map(cty.List(cty.String)), - }, - }, - Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputMap := args[0] - if !inputMap.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - outputMap := make(map[string]cty.Value) - tmpMap := make(map[string][]string) - - for it := inputMap.ElementIterator(); it.Next(); { - inKey, inVal := it.Element() - for iter := inVal.ElementIterator(); iter.Next(); { - _, val := iter.Element() - if !val.Type().Equals(cty.String) { - return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") - } - - outKey := val.AsString() - if _, ok := tmpMap[outKey]; !ok { - tmpMap[outKey] = make([]string, 0) - } - outVal := tmpMap[outKey] - outVal = append(outVal, inKey.AsString()) - sort.Strings(outVal) - tmpMap[outKey] = outVal - } - } - - for outKey, outVal := range tmpMap { - values := make([]cty.Value, 0) - for _, v := range outVal { - values = append(values, cty.StringVal(v)) - } - outputMap[outKey] = cty.ListVal(values) - } - - if len(outputMap) == 0 { - return cty.MapValEmpty(cty.List(cty.String)), nil - } - - return cty.MapVal(outputMap), nil - }, -}) - -// helper function to add an element to a list, if it does not already exist -func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { - for _, ele := range slice { - eq, err := stdlib.Equal(ele, element) - if err != nil { - return slice, err - } - if eq.True() { - return slice, nil - } - } - return append(slice, element), nil -} - -// Length returns the number of elements in the given collection or number of -// Unicode characters in the given string. -func Length(collection cty.Value) (cty.Value, error) { - return LengthFunc.Call([]cty.Value{collection}) -} - -// Coalesce takes any number of arguments and returns the first one that isn't empty. -func Coalesce(args ...cty.Value) (cty.Value, error) { - return CoalesceFunc.Call(args) -} - -// Index finds the element index for a given value in a list. -func Index(list, value cty.Value) (cty.Value, error) { - return IndexFunc.Call([]cty.Value{list, value}) -} - -// List takes any number of list arguments and returns a list containing those -// values in the same order. -func List(args ...cty.Value) (cty.Value, error) { - return ListFunc.Call(args) -} - -// Lookup performs a dynamic lookup into a map. -// There are two required arguments, map and key, plus an optional default, -// which is a value to return if no key is found in map. -func Lookup(args ...cty.Value) (cty.Value, error) { - return LookupFunc.Call(args) -} - -// Map takes an even number of arguments and returns a map whose elements are constructed -// from consecutive pairs of arguments. -func Map(args ...cty.Value) (cty.Value, error) { - return MapFunc.Call(args) -} - -// Matchkeys constructs a new list by taking a subset of elements from one list -// whose indexes match the corresponding indexes of values in another list. -func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { - return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) -} - -// Sum adds numbers in a list, set, or tuple -func Sum(list cty.Value) (cty.Value, error) { - return SumFunc.Call([]cty.Value{list}) -} - -// Transpose takes a map of lists of strings and swaps the keys and values to -// produce a new map of lists of strings. -func Transpose(values cty.Value) (cty.Value, error) { - return TransposeFunc.Call([]cty.Value{values}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go deleted file mode 100644 index 83f85979..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -package funcs - -import ( - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeToFunc constructs a "to..." function, like "tostring", which converts -// its argument to a specific type or type kind. -// -// The given type wantTy can be any type constraint that cty's "convert" package -// would accept. In particular, this means that you can pass -// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which -// will then cause cty to attempt to unify all of the element types when given -// a tuple. -func MakeToFunc(wantTy cty.Type) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "v", - // We use DynamicPseudoType rather than wantTy here so that - // all values will pass through the function API verbatim and - // we can handle the conversion logic within the Type and - // Impl functions. This allows us to customize the error - // messages to be more appropriate for an explicit type - // conversion, whereas the cty function system produces - // messages aimed at _implicit_ type conversions. - Type: cty.DynamicPseudoType, - AllowNull: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - gotTy := args[0].Type() - if gotTy.Equals(wantTy) { - return wantTy, nil - } - conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) - if conv == nil { - // We'll use some specialized errors for some trickier cases, - // but most we can handle in a simple way. - switch { - case gotTy.IsTupleType() && wantTy.IsTupleType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - case gotTy.IsObjectType() && wantTy.IsObjectType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - default: - return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - // If a conversion is available then everything is fine. - return wantTy, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - // We didn't set "AllowUnknown" on our argument, so it is guaranteed - // to be known here but may still be null. - ret, err := convert.Convert(args[0], retType) - if err != nil { - // Because we used GetConversionUnsafe above, conversion can - // still potentially fail in here. For example, if the user - // asks to convert the string "a" to bool then we'll - // optimistically permit it during type checking but fail here - // once we note that the value isn't either "true" or "false". - gotTy := args[0].Type() - switch { - case gotTy == cty.String && wantTy == cty.Bool: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) - case gotTy == cty.String && wantTy == cty.Number: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) - default: - return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - return ret, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go deleted file mode 100644 index 03b1572c..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go +++ /dev/null @@ -1,329 +0,0 @@ -package funcs - -import ( - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "fmt" - "hash" - "strings" - - uuidv5 "github.com/google/uuid" - uuid "github.com/hashicorp/go-uuid" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" - "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/ssh" -) - -var UUIDFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - result, err := uuid.GenerateUUID() - if err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.StringVal(result), nil - }, -}) - -var UUIDV5Func = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "namespace", - Type: cty.String, - }, - { - Name: "name", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var namespace uuidv5.UUID - switch { - case args[0].AsString() == "dns": - namespace = uuidv5.NameSpaceDNS - case args[0].AsString() == "url": - namespace = uuidv5.NameSpaceURL - case args[0].AsString() == "oid": - namespace = uuidv5.NameSpaceOID - case args[0].AsString() == "x500": - namespace = uuidv5.NameSpaceX500 - default: - if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) - } - } - val := args[1].AsString() - return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil - }, -}) - -// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) -} - -// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) -} - -// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. -var BcryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "cost", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - defaultCost := 10 - - if len(args) > 1 { - var val int - if err := gocty.FromCtyValue(args[1], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - defaultCost = val - } - - if len(args) > 2 { - return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") - } - - input := args[0].AsString() - out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. -var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) - -// MakeFileMd5Func constructs a function that is like Md5Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileMd5Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) -} - -// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. -var RsaDecryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "ciphertext", - Type: cty.String, - }, - { - Name: "privatekey", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - key := args[1].AsString() - - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "failed to decode input %q: cipher text must be base64-encoded", s) - } - - rawKey, err := ssh.ParseRawPrivateKey([]byte(key)) - if err != nil { - var errStr string - switch e := err.(type) { - case asn1.SyntaxError: - errStr = strings.ReplaceAll(e.Error(), "asn1: syntax error", "invalid ASN1 data in the given private key") - case asn1.StructuralError: - errStr = strings.ReplaceAll(e.Error(), "asn1: struture error", "invalid ASN1 data in the given private key") - default: - errStr = fmt.Sprintf("invalid private key: %s", e) - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(1, errStr) - } - privateKey, ok := rawKey.(*rsa.PrivateKey) - if !ok { - return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "invalid private key type %t", rawKey) - } - - out, err := rsa.DecryptPKCS1v15(nil, privateKey, b) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decrypt: %s", err) - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Sha1Func contructs a function that computes the SHA1 hash of a given string -// and encodes it with hexadecimal digits. -var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) - -// MakeFileSha1Func constructs a function that is like Sha1Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha1Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) -} - -// Sha256Func contructs a function that computes the SHA256 hash of a given string -// and encodes it with hexadecimal digits. -var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) - -// MakeFileSha256Func constructs a function that is like Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) -} - -// Sha512Func contructs a function that computes the SHA512 hash of a given string -// and encodes it with hexadecimal digits. -var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) - -// MakeFileSha512Func constructs a function that is like Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) -} - -func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - h := hf() - h.Write([]byte(s)) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - h := hf() - h.Write(src) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -// UUID generates and returns a Type-4 UUID in the standard hexadecimal string -// format. -// -// This is not a pure function: it will generate a different result for each -// call. It must therefore be registered as an impure function in the function -// table in the "lang" package. -func UUID() (cty.Value, error) { - return UUIDFunc.Call(nil) -} - -// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string -// format. -func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { - return UUIDV5Func.Call([]cty.Value{namespace, name}) -} - -// Base64Sha256 computes the SHA256 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -func Base64Sha256(str cty.Value) (cty.Value, error) { - return Base64Sha256Func.Call([]cty.Value{str}) -} - -// Base64Sha512 computes the SHA512 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 -func Base64Sha512(str cty.Value) (cty.Value, error) { - return Base64Sha512Func.Call([]cty.Value{str}) -} - -// Bcrypt computes a hash of the given string using the Blowfish cipher, -// returning a string in the Modular Crypt Format -// usually expected in the shadow password file on many Unix systems. -func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(cost)+1) - args[0] = str - copy(args[1:], cost) - return BcryptFunc.Call(args) -} - -// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. -func Md5(str cty.Value) (cty.Value, error) { - return Md5Func.Call([]cty.Value{str}) -} - -// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding -// cleartext. -func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { - return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) -} - -// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. -func Sha1(str cty.Value) (cty.Value, error) { - return Sha1Func.Call([]cty.Value{str}) -} - -// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. -func Sha256(str cty.Value) (cty.Value, error) { - return Sha256Func.Call([]cty.Value{str}) -} - -// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. -func Sha512(str cty.Value) (cty.Value, error) { - return Sha512Func.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go deleted file mode 100644 index 5dae1987..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go +++ /dev/null @@ -1,70 +0,0 @@ -package funcs - -import ( - "time" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// TimestampFunc constructs a function that returns a string representation of the current date and time. -var TimestampFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil - }, -}) - -// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. -var TimeAddFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "timestamp", - Type: cty.String, - }, - { - Name: "duration", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - ts, err := time.Parse(time.RFC3339, args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - duration, err := time.ParseDuration(args[1].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil - }, -}) - -// Timestamp returns a string representation of the current date and time. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax, and so timestamp -// returns a string in this format. -func Timestamp() (cty.Value, error) { - return TimestampFunc.Call([]cty.Value{}) -} - -// TimeAdd adds a duration to a timestamp, returning a new timestamp. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires -// the timestamp argument to be a string conforming to this syntax. -// -// `duration` is a string representation of a time difference, consisting of -// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted -// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first -// number may be negative to indicate a negative duration, like `"-2h5m"`. -// -// The result is a string, also in RFC 3339 format, representing the result -// of adding the given direction to the given timestamp. -func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { - return TimeAddFunc.Call([]cty.Value{timestamp, duration}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go deleted file mode 100644 index d9a0bbb3..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go +++ /dev/null @@ -1,140 +0,0 @@ -package funcs - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "fmt" - "log" - "net/url" - "unicode/utf8" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. -var Base64DecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - sDec, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) - } - if !utf8.Valid([]byte(sDec)) { - log.Printf("[DEBUG] the result of decoding the provided string is not valid UTF-8: %s", sDec) - return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the provided string is not valid UTF-8") - } - return cty.StringVal(string(sDec)), nil - }, -}) - -// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. -var Base64EncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil - }, -}) - -// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in -// Base64 encoding. -var Base64GzipFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - - var b bytes.Buffer - gz := gzip.NewWriter(&b) - if _, err := gz.Write([]byte(s)); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) - } - if err := gz.Flush(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) - } - if err := gz.Close(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) - } - return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil - }, -}) - -// URLEncodeFunc constructs a function that applies URL encoding to a given string. -var URLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(url.QueryEscape(args[0].AsString())), nil - }, -}) - -// Base64Decode decodes a string containing a base64 sequence. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will also interpret the resulting bytes as -// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function -// produces an error. -func Base64Decode(str cty.Value) (cty.Value, error) { - return Base64DecodeFunc.Call([]cty.Value{str}) -} - -// Base64Encode applies Base64 encoding to a string. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, and then apply Base64 encoding to the result. -func Base64Encode(str cty.Value) (cty.Value, error) { - return Base64EncodeFunc.Call([]cty.Value{str}) -} - -// Base64Gzip compresses a string with gzip and then encodes the result in -// Base64 encoding. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. -func Base64Gzip(str cty.Value) (cty.Value, error) { - return Base64GzipFunc.Call([]cty.Value{str}) -} - -// URLEncode applies URL encoding to a given string. -// -// This function identifies characters in the given string that would have a -// special meaning when included as a query string argument in a URL and -// escapes them using RFC 3986 "percent encoding". -// -// If the given string contains non-ASCII characters, these are first encoded as -// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. -func URLEncode(str cty.Value) (cty.Value, error) { - return URLEncodeFunc.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go deleted file mode 100644 index eb4921de..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go +++ /dev/null @@ -1,453 +0,0 @@ -package funcs - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "unicode/utf8" - - "github.com/bmatcuk/doublestar" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - homedir "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeFileFunc constructs a function that takes a file path and returns the -// contents of that file, either directly as a string (where valid UTF-8 is -// required) or as a string containing base64 bytes. -func MakeFileFunc(baseDir string, encBase64 bool) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - switch { - case encBase64: - enc := base64.StdEncoding.EncodeToString(src) - return cty.StringVal(enc), nil - default: - if !utf8.Valid(src) { - return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) - } - return cty.StringVal(string(src)), nil - } - }, - }) -} - -// MakeTemplateFileFunc constructs a function that takes a file path and -// an arbitrary object of named values and attempts to render the referenced -// file as a template using HCL template syntax. -// -// The template itself may recursively call other functions so a callback -// must be provided to get access to those functions. The template cannot, -// however, access any variables defined in the scope: it is restricted only to -// those variables provided in the second function argument, to ensure that all -// dependencies on other graph nodes can be seen before executing this function. -// -// As a special exception, a referenced template file may not recursively call -// the templatefile function, since that would risk the same file being -// included into itself indefinitely. -func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { - - params := []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - { - Name: "vars", - Type: cty.DynamicPseudoType, - }, - } - - loadTmpl := func(fn string) (hcl.Expression, error) { - // We re-use File here to ensure the same filename interpretation - // as it does, along with its other safety checks. - tmplVal, err := File(baseDir, cty.StringVal(fn)) - if err != nil { - return nil, err - } - - expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return nil, diags - } - - return expr, nil - } - - renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { - if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { - return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time - } - - ctx := &hcl.EvalContext{ - Variables: varsVal.AsValueMap(), - } - - // We require all of the variables to be valid HCL identifiers, because - // otherwise there would be no way to refer to them in the template - // anyway. Rejecting this here gives better feedback to the user - // than a syntax error somewhere in the template itself. - for n := range ctx.Variables { - if !hclsyntax.ValidIdentifier(n) { - // This error message intentionally doesn't describe _all_ of - // the different permutations that are technically valid as an - // HCL identifier, but rather focuses on what we might - // consider to be an "idiomatic" variable name. - return cty.DynamicVal, function.NewArgErrorf(1, "invalid template variable name %q: must start with a letter, followed by zero or more letters, digits, and underscores", n) - } - } - - // We'll pre-check references in the template here so we can give a - // more specialized error message than HCL would by default, so it's - // clearer that this problem is coming from a templatefile call. - for _, traversal := range expr.Variables() { - root := traversal.RootName() - if _, ok := ctx.Variables[root]; !ok { - return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) - } - } - - givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems - funcs := make(map[string]function.Function, len(givenFuncs)) - for name, fn := range givenFuncs { - if name == "templatefile" { - // We stub this one out to prevent recursive calls. - funcs[name] = function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") - }, - }) - continue - } - funcs[name] = fn - } - ctx.Functions = funcs - - val, diags := expr.Value(ctx) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - return val, nil - } - - return function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - if !(args[0].IsKnown() && args[1].IsKnown()) { - return cty.DynamicPseudoType, nil - } - - // We'll render our template now to see what result type it produces. - // A template consisting only of a single interpolation an potentially - // return any type. - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicPseudoType, err - } - - // This is safe even if args[1] contains unknowns because the HCL - // template renderer itself knows how to short-circuit those. - val, err := renderTmpl(expr, args[1]) - return val.Type(), err - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicVal, err - } - return renderTmpl(expr, args[1]) - }, - }) - -} - -// MakeFileExistsFunc constructs a function that takes a path -// and determines whether a file exists at that path -func MakeFileExistsFunc(baseDir string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - path, err := homedir.Expand(path) - if err != nil { - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return cty.False, nil - } - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) - } - - if fi.Mode().IsRegular() { - return cty.True, nil - } - - return cty.False, fmt.Errorf("%s is not a regular file, but %q", - path, fi.Mode().String()) - }, - }) -} - -// MakeFileSetFunc constructs a function that takes a glob pattern -// and enumerates a file set from that pattern -func MakeFileSetFunc(baseDir string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - { - Name: "pattern", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.Set(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - pattern := args[1].AsString() - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Join the path to the glob pattern, while ensuring the full - // pattern is canonical for the host OS. The joined path is - // automatically cleaned during this operation. - pattern = filepath.Join(path, pattern) - - matches, err := doublestar.Glob(pattern) - if err != nil { - return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern (%s): %s", pattern, err) - } - - var matchVals []cty.Value - for _, match := range matches { - fi, err := os.Stat(match) - - if err != nil { - return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat (%s): %s", match, err) - } - - if !fi.Mode().IsRegular() { - continue - } - - // Remove the path and file separator from matches. - match, err = filepath.Rel(path, match) - - if err != nil { - return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match (%s): %s", match, err) - } - - // Replace any remaining file separators with forward slash (/) - // separators for cross-system compatibility. - match = filepath.ToSlash(match) - - matchVals = append(matchVals, cty.StringVal(match)) - } - - if len(matchVals) == 0 { - return cty.SetValEmpty(cty.String), nil - } - - return cty.SetVal(matchVals), nil - }, - }) -} - -// BasenameFunc constructs a function that takes a string containing a filesystem path -// and removes all except the last portion from it. -var BasenameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Base(args[0].AsString())), nil - }, -}) - -// DirnameFunc constructs a function that takes a string containing a filesystem path -// and removes the last portion from it. -var DirnameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Dir(args[0].AsString())), nil - }, -}) - -// AbsPathFunc constructs a function that converts a filesystem path to an absolute path -var AbsPathFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - absPath, err := filepath.Abs(args[0].AsString()) - return cty.StringVal(filepath.ToSlash(absPath)), err - }, -}) - -// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. -var PathExpandFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - - homePath, err := homedir.Expand(args[0].AsString()) - return cty.StringVal(homePath), err - }, -}) - -func readFileBytes(baseDir, path string) ([]byte, error) { - path, err := homedir.Expand(path) - if err != nil { - return nil, fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - src, err := ioutil.ReadFile(path) - if err != nil { - // ReadFile does not return Terraform-user-friendly error - // messages, so we'll provide our own. - if os.IsNotExist(err) { - return nil, fmt.Errorf("no file exists at %s", path) - } - return nil, fmt.Errorf("failed to read %s", path) - } - - return src, nil -} - -// File reads the contents of the file at the given path. -// -// The file must contain valid UTF-8 bytes, or this function will return an error. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func File(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, false) - return fn.Call([]cty.Value{path}) -} - -// FileExists determines whether a file exists at the given path. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileExists(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileExistsFunc(baseDir) - return fn.Call([]cty.Value{path}) -} - -// FileSet enumerates a set of files given a glob pattern -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) { - fn := MakeFileSetFunc(baseDir) - return fn.Call([]cty.Value{path, pattern}) -} - -// FileBase64 reads the contents of the file at the given path. -// -// The bytes from the file are encoded as base64 before returning. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, true) - return fn.Call([]cty.Value{path}) -} - -// Basename takes a string containing a filesystem path and removes all except the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Basename(path cty.Value) (cty.Value, error) { - return BasenameFunc.Call([]cty.Value{path}) -} - -// Dirname takes a string containing a filesystem path and removes the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Dirname(path cty.Value) (cty.Value, error) { - return DirnameFunc.Call([]cty.Value{path}) -} - -// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with -// the current user's home directory path. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the leading segment in the path is not `~` then the given path is returned unmodified. -func Pathexpand(path cty.Value) (cty.Value, error) { - return PathExpandFunc.Call([]cty.Value{path}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go deleted file mode 100644 index 43effec1..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go +++ /dev/null @@ -1,169 +0,0 @@ -package funcs - -import ( - "math" - "math/big" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// LogFunc contructs a function that returns the logarithm of a given number in a given base. -var LogFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var base float64 - if err := gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil - }, -}) - -// PowFunc contructs a function that returns the logarithm of a given number in a given base. -var PowFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "power", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var power float64 - if err := gocty.FromCtyValue(args[1], &power); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Pow(num, power)), nil - }, -}) - -// SignumFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var SignumFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num int - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - switch { - case num < 0: - return cty.NumberIntVal(-1), nil - case num > 0: - return cty.NumberIntVal(+1), nil - default: - return cty.NumberIntVal(0), nil - } - }, -}) - -// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. -var ParseIntFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "number", - Type: cty.DynamicPseudoType, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].Type().Equals(cty.String) { - return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) - } - return cty.Number, nil - }, - - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var numstr string - var base int - var err error - - if err = gocty.FromCtyValue(args[0], &numstr); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(0, err) - } - - if err = gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.Number), function.NewArgError(1, err) - } - - if base < 2 || base > 62 { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 1, - "base must be a whole number between 2 and 62 inclusive", - ) - } - - num, ok := (&big.Int{}).SetString(numstr, base) - if !ok { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 0, - "cannot parse %q as a base %d integer", - numstr, - base, - ) - } - - parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) - - return parsedNum, nil - }, -}) - -// Log returns returns the logarithm of a given number in a given base. -func Log(num, base cty.Value) (cty.Value, error) { - return LogFunc.Call([]cty.Value{num, base}) -} - -// Pow returns the logarithm of a given number in a given base. -func Pow(num, power cty.Value) (cty.Value, error) { - return PowFunc.Call([]cty.Value{num, power}) -} - -// Signum determines the sign of a number, returning a number between -1 and -// 1 to represent the sign. -func Signum(num cty.Value) (cty.Value, error) { - return SignumFunc.Call([]cty.Value{num}) -} - -// ParseInt parses a string argument and returns an integer of the specified base. -func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { - return ParseIntFunc.Call([]cty.Value{num, base}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go deleted file mode 100644 index ab6da727..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go +++ /dev/null @@ -1,53 +0,0 @@ -package funcs - -import ( - "regexp" - "strings" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// ReplaceFunc constructs a function that searches a given string for another -// given substring, and replaces each occurence with a given replacement string. -var ReplaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "substr", - Type: cty.String, - }, - { - Name: "replace", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - str := args[0].AsString() - substr := args[1].AsString() - replace := args[2].AsString() - - // We search/replace using a regexp if the string is surrounded - // in forward slashes. - if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { - re, err := regexp.Compile(substr[1 : len(substr)-1]) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(re.ReplaceAllString(str, replace)), nil - } - - return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil - }, -}) - -// Replace searches a given string for another given substring, -// and replaces all occurences with a given replacement string. -func Replace(str, substr, replace cty.Value) (cty.Value, error) { - return ReplaceFunc.Call([]cty.Value{str, substr, replace}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go deleted file mode 100644 index b4cc2d72..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/functions.go +++ /dev/null @@ -1,166 +0,0 @@ -package lang - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/ext/tryfunc" - ctyyaml "github.com/zclconf/go-cty-yaml" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - - "github.com/hashicorp/terraform/lang/funcs" -) - -var impureFunctions = []string{ - "bcrypt", - "timestamp", - "uuid", -} - -// Functions returns the set of functions that should be used to when evaluating -// expressions in the receiving scope. -func (s *Scope) Functions() map[string]function.Function { - s.funcsLock.Lock() - if s.funcs == nil { - // Some of our functions are just directly the cty stdlib functions. - // Others are implemented in the subdirectory "funcs" here in this - // repository. New functions should generally start out their lives - // in the "funcs" directory and potentially graduate to cty stdlib - // later if the functionality seems to be something domain-agnostic - // that would be useful to all applications using cty functions. - - s.funcs = map[string]function.Function{ - "abs": stdlib.AbsoluteFunc, - "abspath": funcs.AbsPathFunc, - "basename": funcs.BasenameFunc, - "base64decode": funcs.Base64DecodeFunc, - "base64encode": funcs.Base64EncodeFunc, - "base64gzip": funcs.Base64GzipFunc, - "base64sha256": funcs.Base64Sha256Func, - "base64sha512": funcs.Base64Sha512Func, - "bcrypt": funcs.BcryptFunc, - "can": tryfunc.CanFunc, - "ceil": stdlib.CeilFunc, - "chomp": stdlib.ChompFunc, - "cidrhost": funcs.CidrHostFunc, - "cidrnetmask": funcs.CidrNetmaskFunc, - "cidrsubnet": funcs.CidrSubnetFunc, - "cidrsubnets": funcs.CidrSubnetsFunc, - "coalesce": funcs.CoalesceFunc, - "coalescelist": stdlib.CoalesceListFunc, - "compact": stdlib.CompactFunc, - "concat": stdlib.ConcatFunc, - "contains": stdlib.ContainsFunc, - "csvdecode": stdlib.CSVDecodeFunc, - "dirname": funcs.DirnameFunc, - "distinct": stdlib.DistinctFunc, - "element": stdlib.ElementFunc, - "chunklist": stdlib.ChunklistFunc, - "file": funcs.MakeFileFunc(s.BaseDir, false), - "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), - "fileset": funcs.MakeFileSetFunc(s.BaseDir), - "filebase64": funcs.MakeFileFunc(s.BaseDir, true), - "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), - "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), - "filemd5": funcs.MakeFileMd5Func(s.BaseDir), - "filesha1": funcs.MakeFileSha1Func(s.BaseDir), - "filesha256": funcs.MakeFileSha256Func(s.BaseDir), - "filesha512": funcs.MakeFileSha512Func(s.BaseDir), - "flatten": stdlib.FlattenFunc, - "floor": stdlib.FloorFunc, - "format": stdlib.FormatFunc, - "formatdate": stdlib.FormatDateFunc, - "formatlist": stdlib.FormatListFunc, - "indent": stdlib.IndentFunc, - "index": funcs.IndexFunc, // stdlib.IndexFunc is not compatible - "join": stdlib.JoinFunc, - "jsondecode": stdlib.JSONDecodeFunc, - "jsonencode": stdlib.JSONEncodeFunc, - "keys": stdlib.KeysFunc, - "length": funcs.LengthFunc, - "list": funcs.ListFunc, - "log": stdlib.LogFunc, - "lookup": funcs.LookupFunc, - "lower": stdlib.LowerFunc, - "map": funcs.MapFunc, - "matchkeys": funcs.MatchkeysFunc, - "max": stdlib.MaxFunc, - "md5": funcs.Md5Func, - "merge": stdlib.MergeFunc, - "min": stdlib.MinFunc, - "parseint": stdlib.ParseIntFunc, - "pathexpand": funcs.PathExpandFunc, - "pow": stdlib.PowFunc, - "range": stdlib.RangeFunc, - "regex": stdlib.RegexFunc, - "regexall": stdlib.RegexAllFunc, - "replace": funcs.ReplaceFunc, - "reverse": stdlib.ReverseListFunc, - "rsadecrypt": funcs.RsaDecryptFunc, - "setintersection": stdlib.SetIntersectionFunc, - "setproduct": stdlib.SetProductFunc, - "setsubtract": stdlib.SetSubtractFunc, - "setunion": stdlib.SetUnionFunc, - "sha1": funcs.Sha1Func, - "sha256": funcs.Sha256Func, - "sha512": funcs.Sha512Func, - "signum": stdlib.SignumFunc, - "slice": stdlib.SliceFunc, - "sort": stdlib.SortFunc, - "split": stdlib.SplitFunc, - "strrev": stdlib.ReverseFunc, - "substr": stdlib.SubstrFunc, - "sum": funcs.SumFunc, - "timestamp": funcs.TimestampFunc, - "timeadd": stdlib.TimeAddFunc, - "title": stdlib.TitleFunc, - "tostring": funcs.MakeToFunc(cty.String), - "tonumber": funcs.MakeToFunc(cty.Number), - "tobool": funcs.MakeToFunc(cty.Bool), - "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), - "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), - "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), - "transpose": funcs.TransposeFunc, - "trim": stdlib.TrimFunc, - "trimprefix": stdlib.TrimPrefixFunc, - "trimspace": stdlib.TrimSpaceFunc, - "trimsuffix": stdlib.TrimSuffixFunc, - "try": tryfunc.TryFunc, - "upper": stdlib.UpperFunc, - "urlencode": funcs.URLEncodeFunc, - "uuid": funcs.UUIDFunc, - "uuidv5": funcs.UUIDV5Func, - "values": stdlib.ValuesFunc, - "yamldecode": ctyyaml.YAMLDecodeFunc, - "yamlencode": ctyyaml.YAMLEncodeFunc, - "zipmap": stdlib.ZipmapFunc, - } - - s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { - // The templatefile function prevents recursive calls to itself - // by copying this map and overwriting the "templatefile" entry. - return s.funcs - }) - - if s.PureOnly { - // Force our few impure functions to return unknown so that we - // can defer evaluating them until a later pass. - for _, name := range impureFunctions { - s.funcs[name] = function.Unpredictable(s.funcs[name]) - } - } - } - s.funcsLock.Unlock() - - return s.funcs -} - -var unimplFunc = function.New(&function.Spec{ - Type: func([]cty.Value) (cty.Type, error) { - return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented") - }, - Impl: func([]cty.Value, cty.Type) (cty.Value, error) { - return cty.DynamicVal, fmt.Errorf("function not yet implemented") - }, -}) diff --git a/vendor/github.com/hashicorp/terraform/lang/references.go b/vendor/github.com/hashicorp/terraform/lang/references.go deleted file mode 100644 index 569251cb..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/references.go +++ /dev/null @@ -1,81 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang/blocktoattr" - "github.com/hashicorp/terraform/tfdiags" -) - -// References finds all of the references in the given set of traversals, -// returning diagnostics if any of the traversals cannot be interpreted as a -// reference. -// -// This function does not do any de-duplication of references, since references -// have source location information embedded in them and so any invalid -// references that are duplicated should have errors reported for each -// occurence. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. Otherwise, the returned slice has one reference per -// given traversal, though it is not guaranteed that the references will -// appear in the same order as the given traversals. -func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { - if len(traversals) == 0 { - return nil, nil - } - - var diags tfdiags.Diagnostics - refs := make([]*addrs.Reference, 0, len(traversals)) - - for _, traversal := range traversals { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if ref == nil { - continue - } - refs = append(refs, ref) - } - - return refs, diags -} - -// ReferencesInBlock is a helper wrapper around References that first searches -// the given body for traversals, before converting those traversals to -// references. -// -// A block schema must be provided so that this function can determine where in -// the body variables are expected. -func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { - if body == nil { - return nil, nil - } - - // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or - // dynblock.VariablesHCLDec here because when we evaluate a block we'll - // first apply the dynamic block extension and _then_ the blocktoattr - // transform, and so blocktoattr.ExpandedVariables takes into account - // both of those transforms when it analyzes the body to ensure we find - // all of the references as if they'd already moved into their final - // locations, even though we can't expand dynamic blocks yet until we - // already know which variables are required. - // - // The set of cases we want to detect here is covered by the tests for - // the plan graph builder in the main 'terraform' package, since it's - // in a better position to test this due to having mock providers etc - // available. - traversals := blocktoattr.ExpandedVariables(body, schema) - return References(traversals) -} - -// ReferencesInExpr is a helper wrapper around References that first searches -// the given expression for traversals, before converting those traversals -// to references. -func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { - if expr == nil { - return nil, nil - } - traversals := expr.Variables() - return References(traversals) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/scope.go b/vendor/github.com/hashicorp/terraform/lang/scope.go deleted file mode 100644 index 98fca6ba..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/scope.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "sync" - - "github.com/zclconf/go-cty/cty/function" - - "github.com/hashicorp/terraform/addrs" -) - -// Scope is the main type in this package, allowing dynamic evaluation of -// blocks and expressions based on some contextual information that informs -// which variables and functions will be available. -type Scope struct { - // Data is used to resolve references in expressions. - Data Data - - // SelfAddr is the address that the "self" object should be an alias of, - // or nil if the "self" object should not be available at all. - SelfAddr addrs.Referenceable - - // BaseDir is the base directory used by any interpolation functions that - // accept filesystem paths as arguments. - BaseDir string - - // PureOnly can be set to true to request that any non-pure functions - // produce unknown value results rather than actually executing. This is - // important during a plan phase to avoid generating results that could - // then differ during apply. - PureOnly bool - - funcs map[string]function.Function - funcsLock sync.Mutex -} diff --git a/vendor/github.com/hashicorp/terraform/plans/action.go b/vendor/github.com/hashicorp/terraform/plans/action.go deleted file mode 100644 index c653b106..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/action.go +++ /dev/null @@ -1,22 +0,0 @@ -package plans - -type Action rune - -const ( - NoOp Action = 0 - Create Action = '+' - Read Action = '←' - Update Action = '~' - DeleteThenCreate Action = '∓' - CreateThenDelete Action = '±' - Delete Action = '-' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type Action - -// IsReplace returns true if the action is one of the two actions that -// represents replacing an existing object with a new object: -// DeleteThenCreate or CreateThenDelete. -func (a Action) IsReplace() bool { - return a == DeleteThenCreate || a == CreateThenDelete -} diff --git a/vendor/github.com/hashicorp/terraform/plans/action_string.go b/vendor/github.com/hashicorp/terraform/plans/action_string.go deleted file mode 100644 index be43ab17..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/action_string.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by "stringer -type Action"; DO NOT EDIT. - -package plans - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoOp-0] - _ = x[Create-43] - _ = x[Read-8592] - _ = x[Update-126] - _ = x[DeleteThenCreate-8723] - _ = x[CreateThenDelete-177] - _ = x[Delete-45] -} - -const ( - _Action_name_0 = "NoOp" - _Action_name_1 = "Create" - _Action_name_2 = "Delete" - _Action_name_3 = "Update" - _Action_name_4 = "CreateThenDelete" - _Action_name_5 = "Read" - _Action_name_6 = "DeleteThenCreate" -) - -func (i Action) String() string { - switch { - case i == 0: - return _Action_name_0 - case i == 43: - return _Action_name_1 - case i == 45: - return _Action_name_2 - case i == 126: - return _Action_name_3 - case i == 177: - return _Action_name_4 - case i == 8592: - return _Action_name_5 - case i == 8723: - return _Action_name_6 - default: - return "Action(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes.go b/vendor/github.com/hashicorp/terraform/plans/changes.go deleted file mode 100644 index 9e8f25ba..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes.go +++ /dev/null @@ -1,354 +0,0 @@ -package plans - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/zclconf/go-cty/cty" -) - -// Changes describes various actions that Terraform will attempt to take if -// the corresponding plan is applied. -// -// A Changes object can be rendered into a visual diff (by the caller, using -// code in another package) for display to the user. -type Changes struct { - // Resources tracks planned changes to resource instance objects. - Resources []*ResourceInstanceChangeSrc - - // Outputs tracks planned changes output values. - // - // Note that although an in-memory plan contains planned changes for - // outputs throughout the configuration, a plan serialized - // to disk retains only the root outputs because they are - // externally-visible, while other outputs are implementation details and - // can be easily re-calculated during the apply phase. Therefore only root - // module outputs will survive a round-trip through a plan file. - Outputs []*OutputChangeSrc -} - -// NewChanges returns a valid Changes object that describes no changes. -func NewChanges() *Changes { - return &Changes{} -} - -func (c *Changes) Empty() bool { - for _, res := range c.Resources { - if res.Action != NoOp { - return false - } - } - return true -} - -// ResourceInstance returns the planned change for the current object of the -// resource instance of the given address, if any. Returns nil if no change is -// planned. -func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { - return rc - } - } - - return nil - -} - -// InstancesForConfigResource returns the planned change for the current objects -// of the resource instances of the given address, if any. Returns nil if no -// changes are planned. -func (c *Changes) InstancesForConfigResource(addr addrs.ConfigResource) []*ResourceInstanceChangeSrc { - var changes []*ResourceInstanceChangeSrc - for _, rc := range c.Resources { - resAddr := rc.Addr.ContainingResource().Config() - if resAddr.Equal(addr) && rc.DeposedKey == states.NotDeposed { - changes = append(changes, rc) - } - } - - return changes -} - -// ResourceInstanceDeposed returns the plan change of a deposed object of -// the resource instance of the given address, if any. Returns nil if no change -// is planned. -func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == key { - return rc - } - } - - return nil -} - -// OutputValue returns the planned change for the output value with the -// given address, if any. Returns nil if no change is planned. -func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { - addrStr := addr.String() - for _, oc := range c.Outputs { - if oc.Addr.String() == addrStr { - return oc - } - } - - return nil -} - -// OutputValues returns planned changes for all outputs for all module -// instances that reside in the parent path. Returns nil if no changes are -// planned. -func (c *Changes) OutputValues(parent addrs.ModuleInstance, module addrs.ModuleCall) []*OutputChangeSrc { - var res []*OutputChangeSrc - - for _, oc := range c.Outputs { - // we can't evaluate root module outputs - if oc.Addr.Module.Equal(addrs.RootModuleInstance) { - continue - } - - changeMod, changeCall := oc.Addr.Module.Call() - // this does not reside on our parent instance path - if !changeMod.Equal(parent) { - continue - } - - // this is not the module you're looking for - if changeCall.Name != module.Name { - continue - } - - res = append(res, oc) - - } - - return res -} - -// SyncWrapper returns a wrapper object around the receiver that can be used -// to make certain changes to the receiver in a concurrency-safe way, as long -// as all callers share the same wrapper object. -func (c *Changes) SyncWrapper() *ChangesSync { - return &ChangesSync{ - changes: c, - } -} - -// ResourceInstanceChange describes a change to a particular resource instance -// object. -type ResourceInstanceChange struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // Change is an embedded description of the change. - Change - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the implied type of the -// corresponding resource type schema for correct operation. -func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { - cs, err := rc.Change.Encode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChangeSrc{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - ProviderAddr: rc.ProviderAddr, - ChangeSrc: *cs, - RequiredReplace: rc.RequiredReplace, - Private: rc.Private, - }, err -} - -// Simplify will, where possible, produce a change with a simpler action than -// the receiever given a flag indicating whether the caller is dealing with -// a normal apply or a destroy. This flag deals with the fact that Terraform -// Core uses a specialized graph node type for destroying; only that -// specialized node should set "destroying" to true. -// -// The following table shows the simplification behavior: -// -// Action Destroying? New Action -// --------+-------------+----------- -// Create true NoOp -// Delete false NoOp -// Replace true Delete -// Replace false Create -// -// For any combination not in the above table, the Simplify just returns the -// receiver as-is. -func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { - if destroying { - switch rc.Action { - case Delete: - // We'll fall out and just return rc verbatim, then. - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Delete, - Before: rc.Before, - After: cty.NullVal(rc.Before.Type()), - }, - } - default: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - } - } else { - switch rc.Action { - case Delete: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Create, - Before: cty.NullVal(rc.After.Type()), - After: rc.After, - }, - } - } - } - - // If we fall out here then our change is already simple enough. - return rc -} - -// OutputChange describes a change to an output value. -type OutputChange struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // Change is an embedded description of the change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - Change - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. -func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { - cs, err := oc.Change.Encode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChangeSrc{ - Addr: oc.Addr, - ChangeSrc: *cs, - Sensitive: oc.Sensitive, - }, err -} - -// Change describes a single change with a given action. -type Change struct { - // Action defines what kind of change is being made. - Action Action - - // Interpretation of Before and After depend on Action: - // - // NoOp Before and After are the same, unchanged value - // Create Before is nil, and After is the expected value after create. - // Read Before is any prior value (nil if no prior), and After is the - // value that was or will be read. - // Update Before is the value prior to update, and After is the expected - // value after update. - // Replace As with Update. - // Delete Before is the value prior to delete, and After is always nil. - // - // Unknown values may appear anywhere within the Before and After values, - // either as the values themselves or as nested elements within known - // collections/structures. - Before, After cty.Value -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the type constraint -// that the values are expected to conform to; to properly decode the values -// later an identical type constraint must be provided at that time. -// -// Where a Change is embedded in some other struct, it's generally better -// to call the corresponding Encode method of that struct rather than working -// directly with its embedded Change. -func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { - beforeDV, err := NewDynamicValue(c.Before, ty) - if err != nil { - return nil, err - } - afterDV, err := NewDynamicValue(c.After, ty) - if err != nil { - return nil, err - } - - return &ChangeSrc{ - Action: c.Action, - Before: beforeDV, - After: afterDV, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_src.go b/vendor/github.com/hashicorp/terraform/plans/changes_src.go deleted file mode 100644 index 553a8408..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes_src.go +++ /dev/null @@ -1,190 +0,0 @@ -package plans - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/zclconf/go-cty/cty" -) - -// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. -// Pass the associated resource type's schema type to method Decode to -// obtain a ResourceInstanceChange. -type ResourceInstanceChangeSrc struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // ChangeSrc is an embedded description of the not-yet-decoded change. - ChangeSrc - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Decode unmarshals the raw representation of the instance object being -// changed. Pass the implied type of the corresponding resource type schema -// for correct operation. -func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { - change, err := rcs.ChangeSrc.Decode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChange{ - Addr: rcs.Addr, - DeposedKey: rcs.DeposedKey, - ProviderAddr: rcs.ProviderAddr, - Change: *change, - RequiredReplace: rcs.RequiredReplace, - Private: rcs.Private, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { - if rcs == nil { - return nil - } - ret := *rcs - - ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) - - if len(ret.Private) != 0 { - private := make([]byte, len(ret.Private)) - copy(private, ret.Private) - ret.Private = private - } - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// OutputChangeSrc describes a change to an output value. -type OutputChangeSrc struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // ChangeSrc is an embedded description of the not-yet-decoded change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - ChangeSrc - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Decode unmarshals the raw representation of the output value being -// changed. -func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { - change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChange{ - Addr: ocs.Addr, - Change: *change, - Sensitive: ocs.Sensitive, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { - if ocs == nil { - return nil - } - ret := *ocs - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// ChangeSrc is a not-yet-decoded Change. -type ChangeSrc struct { - // Action defines what kind of change is being made. - Action Action - - // Before and After correspond to the fields of the same name in Change, - // but have not yet been decoded from the serialized value used for - // storage. - Before, After DynamicValue -} - -// Decode unmarshals the raw representations of the before and after values -// to produce a Change object. Pass the type constraint that the result must -// conform to. -// -// Where a ChangeSrc is embedded in some other struct, it's generally better -// to call the corresponding Decode method of that struct rather than working -// directly with its embedded Change. -func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { - var err error - before := cty.NullVal(ty) - after := cty.NullVal(ty) - - if len(cs.Before) > 0 { - before, err = cs.Before.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'before' value: %s", err) - } - } - if len(cs.After) > 0 { - after, err = cs.After.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'after' value: %s", err) - } - } - return &Change{ - Action: cs.Action, - Before: before, - After: after, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_state.go b/vendor/github.com/hashicorp/terraform/plans/changes_state.go deleted file mode 100644 index 543e6c2b..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes_state.go +++ /dev/null @@ -1,15 +0,0 @@ -package plans - -import ( - "github.com/hashicorp/terraform/states" -) - -// PlannedState merges the set of changes described by the receiver into the -// given prior state to produce the planned result state. -// -// The result is an approximation of the state as it would exist after -// applying these changes, omitting any values that cannot be determined until -// the changes are actually applied. -func (c *Changes) PlannedState(prior *states.State) (*states.State, error) { - panic("Changes.PlannedState not yet implemented") -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go deleted file mode 100644 index 615f8539..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go +++ /dev/null @@ -1,184 +0,0 @@ -package plans - -import ( - "fmt" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" -) - -// ChangesSync is a wrapper around a Changes that provides a concurrency-safe -// interface to insert new changes and retrieve copies of existing changes. -// -// Each ChangesSync is independent of all others, so all concurrent writers -// to a particular Changes must share a single ChangesSync. Behavior is -// undefined if any other caller makes changes to the underlying Changes -// object or its nested objects concurrently with any of the methods of a -// particular ChangesSync. -type ChangesSync struct { - lock sync.Mutex - changes *Changes -} - -// AppendResourceInstanceChange records the given resource instance change in -// the set of planned resource changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { - if cs == nil { - panic("AppendResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Resources = append(cs.changes.Resources, s) -} - -// GetResourceInstanceChange searches the set of resource instance changes for -// one matching the given address and generation, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { - if cs == nil { - panic("GetResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - if gen == states.CurrentGen { - return cs.changes.ResourceInstance(addr).DeepCopy() - } - if dk, ok := gen.(states.DeposedKey); ok { - return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() - } - panic(fmt.Sprintf("unsupported generation value %#v", gen)) -} - -// GetChangesForConfigResource searched the set of resource instance -// changes and returns all changes related to a given configuration address. -// This is be used to find possible changes related to a configuration -// reference. -// -// If no such changes exist, nil is returned. -// -// The returned objects are a deep copy of the change recorded in the plan, so -// callers may mutate them although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetChangesForConfigResource(addr addrs.ConfigResource) []*ResourceInstanceChangeSrc { - if cs == nil { - panic("GetChangesForConfigResource on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - var changes []*ResourceInstanceChangeSrc - for _, c := range cs.changes.InstancesForConfigResource(addr) { - changes = append(changes, c.DeepCopy()) - } - return changes -} - -// RemoveResourceInstanceChange searches the set of resource instance changes -// for one matching the given address and generation, and removes it from the -// set if it exists. -func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { - if cs == nil { - panic("RemoveResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - dk := states.NotDeposed - if realDK, ok := gen.(states.DeposedKey); ok { - dk = realDK - } - - addrStr := addr.String() - for i, r := range cs.changes.Resources { - if r.Addr.String() != addrStr || r.DeposedKey != dk { - continue - } - copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) - cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] - return - } -} - -// AppendOutputChange records the given output value change in the set of -// planned value changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { - if cs == nil { - panic("AppendOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Outputs = append(cs.changes.Outputs, s) -} - -// GetOutputChange searches the set of output value changes for one matching -// the given address, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { - if cs == nil { - panic("GetOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - return cs.changes.OutputValue(addr) -} - -// GetOutputChanges searches the set of output changes for any that reside in -// module instances beneath the given module. If no changes exist, nil -// is returned. -// -// The returned objects are a deep copy of the change recorded in the plan, so -// callers may mutate them although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetOutputChanges(parent addrs.ModuleInstance, module addrs.ModuleCall) []*OutputChangeSrc { - if cs == nil { - panic("GetOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - return cs.changes.OutputValues(parent, module) -} - -// RemoveOutputChange searches the set of output value changes for one matching -// the given address, and removes it from the set if it exists. -func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { - if cs == nil { - panic("RemoveOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - addrStr := addr.String() - for i, o := range cs.changes.Outputs { - if o.Addr.String() != addrStr { - continue - } - copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) - cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] - return - } -} diff --git a/vendor/github.com/hashicorp/terraform/plans/doc.go b/vendor/github.com/hashicorp/terraform/plans/doc.go deleted file mode 100644 index 01ca3892..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package plans contains the types that are used to represent Terraform plans. -// -// A plan describes a set of changes that Terraform will make to update remote -// objects to match with changes to the configuration. -package plans diff --git a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go deleted file mode 100644 index 51fbb24c..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go +++ /dev/null @@ -1,96 +0,0 @@ -package plans - -import ( - "github.com/zclconf/go-cty/cty" - ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" -) - -// DynamicValue is the representation in the plan of a value whose type cannot -// be determined at compile time, such as because it comes from a schema -// defined in a plugin. -// -// This type is used as an indirection so that the overall plan structure can -// be decoded without schema available, and then the dynamic values accessed -// at a later time once the appropriate schema has been determined. -// -// Internally, DynamicValue is a serialized version of a cty.Value created -// against a particular type constraint. Callers should not access directly -// the serialized form, whose format may change in future. Values of this -// type must always be created by calling NewDynamicValue. -// -// The zero value of DynamicValue is nil, and represents the absense of a -// value within the Go type system. This is distinct from a cty.NullVal -// result, which represents the absense of a value within the cty type system. -type DynamicValue []byte - -// NewDynamicValue creates a DynamicValue by serializing the given value -// against the given type constraint. The value must conform to the type -// constraint, or the result is undefined. -// -// If the value to be encoded has no predefined schema (for example, for -// module output values and input variables), set the type constraint to -// cty.DynamicPseudoType in order to save type information as part of the -// value, and then also pass cty.DynamicPseudoType to method Decode to recover -// the original value. -// -// cty.NilVal can be used to represent the absense of a value, but callers -// must be careful to distinguish values that are absent at the Go layer -// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal -// results). -func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { - // If we're given cty.NilVal (the zero value of cty.Value, which is - // distinct from a typed null value created by cty.NullVal) then we'll - // assume the caller is trying to represent the _absense_ of a value, - // and so we'll return a nil DynamicValue. - if val == cty.NilVal { - return DynamicValue(nil), nil - } - - // Currently our internal encoding is msgpack, via ctymsgpack. - buf, err := ctymsgpack.Marshal(val, ty) - if err != nil { - return nil, err - } - - return DynamicValue(buf), nil -} - -// Decode retrieves the effective value from the receiever by interpreting the -// serialized form against the given type constraint. For correct results, -// the type constraint must match (or be consistent with) the one that was -// used to create the receiver. -// -// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and -// instead represents the absense of a value. -func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { - if v == nil { - return cty.NilVal, nil - } - - return ctymsgpack.Unmarshal([]byte(v), ty) -} - -// ImpliedType returns the type implied by the serialized structure of the -// receiving value. -// -// This will not necessarily be exactly the type that was given when the -// value was encoded, and in particular must not be used for values that -// were encoded with their static type given as cty.DynamicPseudoType. -// It is however safe to use this method for values that were encoded using -// their runtime type as the conforming type, with the result being -// semantically equivalent but with all lists and sets represented as tuples, -// and maps as objects, due to ambiguities of the serialization. -func (v DynamicValue) ImpliedType() (cty.Type, error) { - return ctymsgpack.ImpliedType([]byte(v)) -} - -// Copy produces a copy of the receiver with a distinct backing array. -func (v DynamicValue) Copy() DynamicValue { - if v == nil { - return nil - } - - ret := make(DynamicValue, len(v)) - copy(ret, v) - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/action.go b/vendor/github.com/hashicorp/terraform/plans/objchange/action.go deleted file mode 100644 index 2ca32097..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/action.go +++ /dev/null @@ -1,40 +0,0 @@ -package objchange - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/plans" -) - -// ActionForChange determines which plans.Action value best describes a -// change from the value given in before to the value given in after. -// -// Because it has no context aside from the values, it can only return the -// basic actions NoOp, Create, Update, and Delete. Other codepaths with -// additional information might make this decision differently, such as by -// using the Replace action instead of the Update action where that makes -// sense. -// -// If the after value is unknown then the action can't be properly decided, and -// so ActionForChange will conservatively return either Create or Update -// depending on whether the before value is null. The before value must always -// be fully known; ActionForChange will panic if it contains any unknown values. -func ActionForChange(before, after cty.Value) plans.Action { - switch { - case !after.IsKnown(): - if before.IsNull() { - return plans.Create - } - return plans.Update - case after.IsNull() && before.IsNull(): - return plans.NoOp - case after.IsNull() && !before.IsNull(): - return plans.Delete - case before.IsNull() && !after.IsNull(): - return plans.Create - case after.RawEquals(before): - return plans.NoOp - default: - return plans.Update - } -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go deleted file mode 100644 index 18a7e99a..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go +++ /dev/null @@ -1,18 +0,0 @@ -package objchange - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// AllAttributesNull constructs a non-null cty.Value of the object type implied -// by the given schema that has all of its leaf attributes set to null and all -// of its nested block collections set to zero-length. -// -// This simulates what would result from decoding an empty configuration block -// with the given schema, except that it does not produce errors -func AllAttributesNull(schema *configschema.Block) cty.Value { - // "All attributes null" happens to be the definition of EmptyValue for - // a Block, so we can just delegate to that. - return schema.EmptyValue() -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go deleted file mode 100644 index d85086c9..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go +++ /dev/null @@ -1,447 +0,0 @@ -package objchange - -import ( - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// AssertObjectCompatible checks whether the given "actual" value is a valid -// completion of the possibly-partially-unknown "planned" value. -// -// This means that any known leaf value in "planned" must be equal to the -// corresponding value in "actual", and various other similar constraints. -// -// Any inconsistencies are reported by returning a non-zero number of errors. -// These errors are usually (but not necessarily) cty.PathError values -// referring to a particular nested value within the "actual" value. -// -// The two values must have types that conform to the given schema's implied -// type, or this function will panic. -func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { - return assertObjectCompatible(schema, planned, actual, nil) -} - -func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { - var errs []error - if planned.IsNull() && !actual.IsNull() { - errs = append(errs, path.NewErrorf("was absent, but now present")) - return errs - } - if actual.IsNull() && !planned.IsNull() { - errs = append(errs, path.NewErrorf("was present, but now absent")) - return errs - } - if planned.IsNull() { - // No further checks possible if both values are null - return errs - } - - for name, attrS := range schema.Attributes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertValueCompatible(plannedV, actualV, path) - if attrS.Sensitive { - if len(moreErrs) > 0 { - // Use a vague placeholder message instead, to avoid disclosing - // sensitive information. - errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) - } - } else { - errs = append(errs, moreErrs...) - } - } - for name, blockS := range schema.BlockTypes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - // As a special case, if there were any blocks whose leaf attributes - // are all unknown then we assume (possibly incorrectly) that the - // HCL dynamic block extension is in use with an unknown for_each - // argument, and so we will do looser validation here that allows - // for those blocks to have expanded into a different number of blocks - // if the for_each value is now known. - maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) - - path := append(path, cty.GetAttrStep{Name: name}) - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - // If an unknown block placeholder was present then the placeholder - // may have expanded out into zero blocks, which is okay. - if maybeUnknownBlocks && actualV.IsNull() { - continue - } - moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - if maybeUnknownBlocks { - // When unknown blocks are present the final blocks may be - // at different indices than the planned blocks, so unfortunately - // we can't do our usual checks in this case without generating - // false negatives. - continue - } - - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL { - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so both values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - actualAtys := actualV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := actualAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q has vanished", k)) - continue - } - - plannedEV := plannedV.GetAttr(k) - actualEV := actualV.GetAttr(k) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) - errs = append(errs, moreErrs...) - } - if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan - for k := range actualAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) - continue - } - } - } - } else { - if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - } - case configschema.NestingSet: - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { - errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - if maybeUnknownBlocks { - // When unknown blocks are present the final number of blocks - // may be different, either because the unknown set values - // become equal and are collapsed, or the count is unknown due - // a dynamic block. Unfortunately this means we can't do our - // usual checks in this case without generating false - // negatives. - continue - } - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) - } - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - return errs -} - -func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { - // NOTE: We don't normally use the GoString rendering of cty.Value in - // user-facing error messages as a rule, but we make an exception - // for this function because we expect the user to pass this message on - // verbatim to the provider development team and so more detail is better. - - var errs []error - if planned.Type() == cty.DynamicPseudoType { - // Anything goes, then - return errs - } - if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { - errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) - // If the types don't match then we can't do any other comparisons, - // so we bail early. - return errs - } - - if !planned.IsKnown() { - // We didn't know what were going to end up with during plan, so - // anything goes during apply. - return errs - } - - if actual.IsNull() { - if planned.IsNull() { - return nil - } - errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) - return errs - } - if planned.IsNull() { - errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) - return errs - } - - ty := planned.Type() - switch { - - case !actual.IsKnown(): - errs = append(errs, path.NewErrorf("was known, but now unknown")) - - case ty.IsPrimitiveType(): - if !actual.Equals(planned).True() { - errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) - } - - case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): - for it := planned.ElementIterator(); it.Next(); { - k, plannedV := it.Element() - if !actual.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) - continue - } - - actualV := actual.Index(k) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) - errs = append(errs, moreErrs...) - } - - for it := actual.ElementIterator(); it.Next(); { - k, _ := it.Element() - if !planned.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) - } - } - - case ty.IsObjectType(): - atys := ty.AttributeTypes() - for name := range atys { - // Because we already tested that the two values have the same type, - // we can assume that the same attributes are present in both and - // focus just on testing their values. - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) - errs = append(errs, moreErrs...) - } - - case ty.IsSetType(): - // We can't really do anything useful for sets here because changing - // an unknown element to known changes the identity of the element, and - // so we can't correlate them properly. However, we will at least check - // to ensure that the number of elements is consistent, along with - // the general type-match checks we ran earlier in this function. - if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { - - setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { - errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - - plannedL := planned.LengthInt() - actualL := actual.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) - } - } - } - - return errs -} - -func indexStrForErrors(v cty.Value) string { - switch v.Type() { - case cty.Number: - return v.AsBigFloat().Text('f', -1) - case cty.String: - return strconv.Quote(v.AsString()) - default: - // Should be impossible, since no other index types are allowed! - return fmt.Sprintf("%#v", v) - } -} - -// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the -// HCL dynamic block extension behaves when it's asked to expand a block whose -// for_each argument is unknown. In such cases, it generates a single placeholder -// block with all leaf attribute values unknown, and once the for_each -// expression becomes known the placeholder may be replaced with any number -// of blocks, so object compatibility checks would need to be more liberal. -// -// Set "nested" if testing a block that is nested inside a candidate block -// placeholder; this changes the interpretation of there being no blocks of -// a type to allow for there being zero nested blocks. -func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - if nested && v.IsNull() { - return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder - } - return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) - default: - // These situations should be impossible for correct providers, but - // we permit the legacy SDK to produce some incorrect outcomes - // for compatibility with its existing logic, and so we must be - // tolerant here. - if !v.IsKnown() { - return true - } - if v.IsNull() { - return false // treated as if the list were empty, so we would see zero iterations below - } - - // For all other nesting modes, our value should be something iterable. - for it := v.ElementIterator(); it.Next(); { - _, ev := it.Element() - if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { - return true - } - } - - // Our default changes depending on whether we're testing the candidate - // block itself or something nested inside of it: zero blocks of a type - // can never contain a dynamic block placeholder, but a dynamic block - // placeholder might contain zero blocks of one of its own nested block - // types, if none were set in the config at all. - return nested - } -} - -func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { - if v.IsNull() { - return false // null value can never be a placeholder element - } - if !v.IsKnown() { - return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs - } - for name := range schema.Attributes { - av := v.GetAttr(name) - - // Unknown block placeholders contain only unknown or null attribute - // values, depending on whether or not a particular attribute was set - // explicitly inside the content block. Note that this is imprecise: - // non-placeholders can also match this, so this function can generate - // false positives. - if av.IsKnown() && !av.IsNull() { - return false - } - } - for name, blockS := range schema.BlockTypes { - if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { - return false - } - } - return true -} - -// assertSetValuesCompatible checks that each of the elements in a can -// be correlated with at least one equivalent element in b and vice-versa, -// using the given correlation function. -// -// This allows the number of elements in the sets to change as long as all -// elements in both sets can be correlated, making this function safe to use -// with sets that may contain unknown values as long as the unknown case is -// addressed in some reasonable way in the callback function. -// -// The callback always recieves values from set a as its first argument and -// values from set b in its second argument, so it is safe to use with -// non-commutative functions. -// -// As with assertValueCompatible, we assume that the target audience of error -// messages here is a provider developer (via a bug report from a user) and so -// we intentionally violate our usual rule of keeping cty implementation -// details out of error messages. -func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { - a := planned - b := actual - - // Our methodology here is a little tricky, to deal with the fact that - // it's impossible to directly correlate two non-equal set elements because - // they don't have identities separate from their values. - // The approach is to count the number of equivalent elements each element - // of a has in b and vice-versa, and then return true only if each element - // in both sets has at least one equivalent. - as := a.AsValueSlice() - bs := b.AsValueSlice() - aeqs := make([]bool, len(as)) - beqs := make([]bool, len(bs)) - for ai, av := range as { - for bi, bv := range bs { - if f(av, bv) { - aeqs[ai] = true - beqs[bi] = true - } - } - } - - var errs []error - for i, eq := range aeqs { - if !eq { - errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) - } - } - if len(errs) > 0 { - // Exit early since otherwise we're likely to generate duplicate - // error messages from the other perspective in the subsequent loop. - return errs - } - for i, eq := range beqs { - if !eq { - errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) - } - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go deleted file mode 100644 index 2c18a010..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package objchange deals with the business logic of taking a prior state -// value and a config value and producing a proposed new merged value, along -// with other related rules in this domain. -package objchange diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go deleted file mode 100644 index cbfefddd..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go +++ /dev/null @@ -1,104 +0,0 @@ -package objchange - -import ( - "github.com/zclconf/go-cty/cty" -) - -// LongestCommonSubsequence finds a sequence of values that are common to both -// x and y, with the same relative ordering as in both collections. This result -// is useful as a first step towards computing a diff showing added/removed -// elements in a sequence. -// -// The approached used here is a "naive" one, assuming that both xs and ys will -// generally be small in most reasonable Terraform configurations. For larger -// lists the time/space usage may be sub-optimal. -// -// A pair of lists may have multiple longest common subsequences. In that -// case, the one selected by this function is undefined. -func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { - if len(xs) == 0 || len(ys) == 0 { - return make([]cty.Value, 0) - } - - c := make([]int, len(xs)*len(ys)) - eqs := make([]bool, len(xs)*len(ys)) - w := len(xs) - - for y := 0; y < len(ys); y++ { - for x := 0; x < len(xs); x++ { - eqV := xs[x].Equals(ys[y]) - eq := false - if eqV.IsKnown() && eqV.True() { - eq = true - eqs[(w*y)+x] = true // equality tests can be expensive, so cache it - } - if eq { - // Sequence gets one longer than for the cell at top left, - // since we'd append a new item to the sequence here. - if x == 0 || y == 0 { - c[(w*y)+x] = 1 - } else { - c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 - } - } else { - // We follow the longest of the sequence above and the sequence - // to the left of us in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - c[(w*y)+x] = l - } else { - c[(w*y)+x] = u - } - } - } - } - - // The bottom right cell tells us how long our longest sequence will be - seq := make([]cty.Value, c[len(c)-1]) - - // Now we will walk back from the bottom right cell, finding again all - // of the equal pairs to construct our sequence. - x := len(xs) - 1 - y := len(ys) - 1 - i := len(seq) - 1 - - for x > -1 && y > -1 { - if eqs[(w*y)+x] { - // Add the value to our result list and then walk diagonally - // up and to the left. - seq[i] = xs[x] - x-- - y-- - i-- - } else { - // Take the path with the greatest sequence length in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - x-- - } else { - y-- - } - } - } - - if i > -1 { - // should never happen if the matrix was constructed properly - panic("not enough elements in sequence") - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go deleted file mode 100644 index c23f44da..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go +++ /dev/null @@ -1,132 +0,0 @@ -package objchange - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// NormalizeObjectFromLegacySDK takes an object that may have been generated -// by the legacy Terraform SDK (i.e. returned from a provider with the -// LegacyTypeSystem opt-out set) and does its best to normalize it for the -// assumptions we would normally enforce if the provider had not opted out. -// -// In particular, this function guarantees that a value representing a nested -// block will never itself be unknown or null, instead representing that as -// a non-null value that may contain null/unknown values. -// -// The input value must still conform to the implied type of the given schema, -// or else this function may produce garbage results or panic. This is usually -// okay because type consistency is enforced when deserializing the value -// returned from the provider over the RPC wire protocol anyway. -func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { - if val == cty.NilVal || val.IsNull() { - // This should never happen in reasonable use, but we'll allow it - // and normalize to a null of the expected type rather than panicking - // below. - return cty.NullVal(schema.ImpliedType()) - } - - vals := make(map[string]cty.Value) - for name := range schema.Attributes { - // No normalization for attributes, since them being type-conformant - // is all that we require. - vals[name] = val.GetAttr(name) - } - for name, blockS := range schema.BlockTypes { - lv := val.GetAttr(name) - - // Legacy SDK never generates dynamically-typed attributes and so our - // normalization code doesn't deal with them, but we need to make sure - // we still pass them through properly so that we don't interfere with - // objects generated by other SDKs. - if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { - vals[name] = lv - continue - } - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - if lv.IsKnown() { - if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { - vals[name] = blockS.EmptyValue() - } else { - vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block) - } - } else { - vals[name] = unknownBlockStub(&blockS.Block) - } - case configschema.NestingList: - switch { - case !lv.IsKnown(): - vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) - case lv.IsNull() || lv.LengthInt() == 0: - vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) - default: - subVals := make([]cty.Value, 0, lv.LengthInt()) - for it := lv.ElementIterator(); it.Next(); { - _, subVal := it.Element() - subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) - } - vals[name] = cty.ListVal(subVals) - } - case configschema.NestingSet: - switch { - case !lv.IsKnown(): - vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) - case lv.IsNull() || lv.LengthInt() == 0: - vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) - default: - subVals := make([]cty.Value, 0, lv.LengthInt()) - for it := lv.ElementIterator(); it.Next(); { - _, subVal := it.Element() - subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) - } - vals[name] = cty.SetVal(subVals) - } - default: - // The legacy SDK doesn't support NestingMap, so we just assume - // maps are always okay. (If not, we would've detected and returned - // an error to the user before we got here.) - vals[name] = lv - } - } - return cty.ObjectVal(vals) -} - -// unknownBlockStub constructs an object value that approximates an unknown -// block by producing a known block object with all of its leaf attribute -// values set to unknown. -// -// Blocks themselves cannot be unknown, so if the legacy SDK tries to return -// such a thing, we'll use this result instead. This convention mimics how -// the dynamic block feature deals with being asked to iterate over an unknown -// value, because our value-checking functions already accept this convention -// as a special case. -func unknownBlockStub(schema *configschema.Block) cty.Value { - vals := make(map[string]cty.Value) - for name, attrS := range schema.Attributes { - vals[name] = cty.UnknownVal(attrS.Type) - } - for name, blockS := range schema.BlockTypes { - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - vals[name] = unknownBlockStub(&blockS.Block) - case configschema.NestingList: - // In principle we may be expected to produce a tuple value here, - // if there are any dynamically-typed attributes in our nested block, - // but the legacy SDK doesn't support that, so we just assume it'll - // never be necessary to normalize those. (Incorrect usage in any - // other SDK would be caught and returned as an error before we - // get here.) - vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) - case configschema.NestingSet: - vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) - case configschema.NestingMap: - // A nesting map can never be unknown since we then wouldn't know - // what the keys are. (Legacy SDK doesn't support NestingMap anyway, - // so this should never arise.) - vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) - } - } - return cty.ObjectVal(vals) -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go deleted file mode 100644 index 5a8af148..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go +++ /dev/null @@ -1,390 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// ProposedNewObject constructs a proposed new object value by combining the -// computed attribute values from "prior" with the configured attribute values -// from "config". -// -// Both value must conform to the given schema's implied type, or this function -// will panic. -// -// The prior value must be wholly known, but the config value may be unknown -// or have nested unknown values. -// -// The merging of the two objects includes the attributes of any nested blocks, -// which will be correlated in a manner appropriate for their nesting mode. -// Note in particular that the correlation for blocks backed by sets is a -// heuristic based on matching non-computed attribute values and so it may -// produce strange results with more "extreme" cases, such as a nested set -// block where _all_ attributes are computed. -func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - // If the config and prior are both null, return early here before - // populating the prior block. The prevents non-null blocks from appearing - // the proposed state value. - if config.IsNull() && prior.IsNull() { - return prior - } - - if prior.IsNull() { - // In this case, we will construct a synthetic prior value that is - // similar to the result of decoding an empty configuration block, - // which simplifies our handling of the top-level attributes/blocks - // below by giving us one non-null level of object to pull values from. - prior = AllAttributesNull(schema) - } - return proposedNewObject(schema, prior, config) -} - -// PlannedDataResourceObject is similar to ProposedNewObject but tailored for -// planning data resources in particular. Specifically, it replaces the values -// of any Computed attributes not set in the configuration with an unknown -// value, which serves as a placeholder for a value to be filled in by the -// provider when the data resource is finally read. -// -// Data resources are different because the planning of them is handled -// entirely within Terraform Core and not subject to customization by the -// provider. This function is, in effect, producing an equivalent result to -// passing the ProposedNewObject result into a provider's PlanResourceChange -// function, assuming a fixed implementation of PlanResourceChange that just -// fills in unknown values as needed. -func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { - // Our trick here is to run the ProposedNewObject logic with an - // entirely-unknown prior value. Because of cty's unknown short-circuit - // behavior, any operation on prior returns another unknown, and so - // unknown values propagate into all of the parts of the resulting value - // that would normally be filled in by preserving the prior state. - prior := cty.UnknownVal(schema.ImpliedType()) - return proposedNewObject(schema, prior, config) -} - -func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - if config.IsNull() || !config.IsKnown() { - // This is a weird situation, but we'll allow it anyway to free - // callers from needing to specifically check for these cases. - return prior - } - if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { - panic("ProposedNewObject only supports object-typed values") - } - - // From this point onwards, we can assume that both values are non-null - // object types, and that the config value itself is known (though it - // may contain nested values that are unknown.) - - newAttrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch { - case attr.Computed && attr.Optional: - // This is the trickiest scenario: we want to keep the prior value - // if the config isn't overriding it. Note that due to some - // ambiguity here, setting an optional+computed attribute from - // config and then later switching the config to null in a - // subsequent change causes the initial config value to be "sticky" - // unless the provider specifically overrides it during its own - // plan customization step. - if configV.IsNull() { - newV = priorV - } else { - newV = configV - } - case attr.Computed: - // configV will always be null in this case, by definition. - // priorV may also be null, but that's okay. - newV = priorV - default: - // For non-computed attributes, we always take the config value, - // even if it is null. If it's _required_ then null values - // should've been caught during an earlier validation step, and - // so we don't really care about that here. - newV = configV - } - newAttrs[name] = newV - } - - // Merging nested blocks is a little more complex, since we need to - // correlate blocks between both objects and then recursively propose - // a new object for each. The correlation logic depends on the nesting - // mode for each block type. - for name, blockType := range schema.BlockTypes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - newV = ProposedNewObject(&blockType.Block, priorV, configV) - - case configschema.NestingList: - // Nested blocks are correlated by index. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals = append(newVals, configEV) - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.TupleVal(newVals) - } else { - newV = cty.ListVal(newVals) - } - } else { - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.EmptyTupleVal - } else { - newV = cty.ListValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingMap: - // Despite the name, a NestingMap may produce either a map or - // object value, depending on whether the nested schema contains - // dynamically-typed attributes. - if configV.Type().IsObjectType() { - // Nested blocks are correlated by key. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - atys := configV.Type().AttributeTypes() - for name := range atys { - configEV := configV.GetAttr(name) - if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[name] = configEV - continue - } - priorEV := priorV.GetAttr(name) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[name] = newEV - } - // Although we call the nesting mode "map", we actually use - // object values so that elements might have different types - // in case of dynamically-typed attributes. - newV = cty.ObjectVal(newVals) - } else { - newV = cty.EmptyObjectVal - } - } else { - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - k := idx.AsString() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[k] = configEV - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[k] = newEV - } - newV = cty.MapVal(newVals) - } else { - newV = cty.MapValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingSet: - if !configV.Type().IsSetType() { - panic("configschema.NestingSet value is not a set as expected") - } - - // Nested blocks are correlated by comparing the element values - // after eliminating all of the computed attributes. In practice, - // this means that any config change produces an entirely new - // nested object, and we only propagate prior computed values - // if the non-computed attribute values are identical. - var cmpVals [][2]cty.Value - if priorV.IsKnown() && !priorV.IsNull() { - cmpVals = setElementCompareValues(&blockType.Block, priorV, false) - } - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - _, configEV := it.Element() - var priorEV cty.Value - for i, cmp := range cmpVals { - if used[i] { - continue - } - if cmp[1].RawEquals(configEV) { - priorEV = cmp[0] - used[i] = true // we can't use this value on a future iteration - break - } - } - if priorEV == cty.NilVal { - priorEV = cty.NullVal(blockType.ImpliedType()) - } - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - newV = cty.SetVal(newVals) - } else { - newV = cty.SetValEmpty(blockType.Block.ImpliedType()) - } - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - - newAttrs[name] = newV - } - - return cty.ObjectVal(newAttrs) -} - -// setElementCompareValues takes a known, non-null value of a cty.Set type and -// returns a table -- constructed of two-element arrays -- that maps original -// set element values to corresponding values that have all of the computed -// values removed, making them suitable for comparison with values obtained -// from configuration. The element type of the set must conform to the implied -// type of the given schema, or this function will panic. -// -// In the resulting slice, the zeroth element of each array is the original -// value and the one-indexed element is the corresponding "compare value". -// -// This is intended to help correlate prior elements with configured elements -// in ProposedNewObject. The result is a heuristic rather than an exact science, -// since e.g. two separate elements may reduce to the same value through this -// process. The caller must therefore be ready to deal with duplicates. -func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { - ret := make([][2]cty.Value, 0, set.LengthInt()) - for it := set.ElementIterator(); it.Next(); { - _, ev := it.Element() - ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) - } - return ret -} - -// setElementCompareValue creates a new value that has all of the same -// non-computed attribute values as the one given but has all computed -// attribute values forced to null. -// -// If isConfig is true then non-null Optional+Computed attribute values will -// be preserved. Otherwise, they will also be set to null. -// -// The input value must conform to the schema's implied type, and the return -// value is guaranteed to conform to it. -func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { - if v.IsNull() || !v.IsKnown() { - return v - } - - attrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - switch { - case attr.Computed && attr.Optional: - if isConfig { - attrs[name] = v.GetAttr(name) - } else { - attrs[name] = cty.NullVal(attr.Type) - } - case attr.Computed: - attrs[name] = cty.NullVal(attr.Type) - default: - attrs[name] = v.GetAttr(name) - } - } - - for name, blockType := range schema.BlockTypes { - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) - - case configschema.NestingList, configschema.NestingSet: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - if l := cv.LengthInt(); l > 0 { - elems := make([]cty.Value, 0, l) - for it := cv.ElementIterator(); it.Next(); { - _, ev := it.Element() - elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) - } - if blockType.Nesting == configschema.NestingSet { - // SetValEmpty would panic if given elements that are not - // all of the same type, but that's guaranteed not to - // happen here because our input value was _already_ a - // set and we've not changed the types of any elements here. - attrs[name] = cty.SetVal(elems) - } else { - attrs[name] = cty.TupleVal(elems) - } - } else { - if blockType.Nesting == configschema.NestingSet { - attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) - } else { - attrs[name] = cty.EmptyTupleVal - } - } - - case configschema.NestingMap: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - elems := make(map[string]cty.Value) - for it := cv.ElementIterator(); it.Next(); { - kv, ev := it.Element() - elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) - } - attrs[name] = cty.ObjectVal(elems) - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - } - - return cty.ObjectVal(attrs) -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go deleted file mode 100644 index 69acb897..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go +++ /dev/null @@ -1,267 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// AssertPlanValid checks checks whether a planned new state returned by a -// provider's PlanResourceChange method is suitable to achieve a change -// from priorState to config. It returns a slice with nonzero length if -// any problems are detected. Because problems here indicate bugs in the -// provider that generated the plannedState, they are written with provider -// developers as an audience, rather than end-users. -// -// All of the given values must have the same type and must conform to the -// implied type of the given schema, or this function may panic or produce -// garbage results. -// -// During planning, a provider may only make changes to attributes that are -// null (unset) in the configuration and are marked as "computed" in the -// resource type schema, in order to insert any default values the provider -// may know about. If the default value cannot be determined until apply time, -// the provider can return an unknown value. Providers are forbidden from -// planning a change that disagrees with any non-null argument in the -// configuration. -// -// As a special exception, providers _are_ allowed to provide attribute values -// conflicting with configuration if and only if the planned value exactly -// matches the corresponding attribute value in the prior state. The provider -// can use this to signal that the new value is functionally equivalent to -// the old and thus no change is required. -func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { - return assertPlanValid(schema, priorState, config, plannedState, nil) -} - -func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { - var errs []error - if plannedState.IsNull() && !config.IsNull() { - errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) - return errs - } - if config.IsNull() && !plannedState.IsNull() { - errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) - return errs - } - if plannedState.IsNull() { - // No further checks possible if the planned value is null - return errs - } - - impTy := schema.ImpliedType() - - for name, attrS := range schema.Attributes { - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(attrS.Type) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - } - for name, blockS := range schema.BlockTypes { - path := append(path, cty.GetAttrStep{Name: name}) - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(impTy.AttributeType(name)) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - if plannedV.RawEquals(configV) { - // Easy path: nothing has changed at all - continue - } - if !plannedV.IsKnown() { - errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - if !configV.HasIndex(idx).True() { - continue // should never happen since we checked the lengths above - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so all values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - configAtys := configV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := configAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - path := append(path, cty.GetAttrStep{Name: k}) - - plannedEV := plannedV.GetAttr(k) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - configEV := configV.GetAttr(k) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.Type().HasAttribute(k) { - priorEV = priorV.GetAttr(k) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for k := range configAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) - continue - } - } - } else { - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - k := idx.AsString() - if !configV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for it := configV.ElementIterator(); it.Next(); { - idx, _ := it.Element() - if !plannedV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) - continue - } - } - } - case configschema.NestingSet: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // Because set elements have no identifier with which to correlate - // them, we can't robustly validate the plan for a nested block - // backed by a set, and so unfortunately we need to just trust the - // provider to do the right thing. :( - // - // (In principle we could correlate elements by matching the - // subset of attributes explicitly set in config, except for the - // special diff suppression rule which allows for there to be a - // planned value that is constructed by mixing part of a prior - // value with part of a config value, creating an entirely new - // element that is not present in either prior nor config.) - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - } - - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - - return errs -} - -func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { - var errs []error - if plannedV.RawEquals(configV) { - // This is the easy path: provider didn't change anything at all. - return errs - } - if plannedV.RawEquals(priorV) && !priorV.IsNull() { - // Also pretty easy: there is a prior value and the provider has - // returned it unchanged. This indicates that configV and plannedV - // are functionally equivalent and so the provider wishes to disregard - // the configuration value in favor of the prior. - return errs - } - if attrS.Computed && configV.IsNull() { - // The provider is allowed to change the value of any computed - // attribute that isn't explicitly set in the config. - return errs - } - - // If none of the above conditions match, the provider has made an invalid - // change to this attribute. - if priorV.IsNull() { - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) - } - return errs - } - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/plans/plan.go b/vendor/github.com/hashicorp/terraform/plans/plan.go deleted file mode 100644 index 5a3e4548..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/plan.go +++ /dev/null @@ -1,92 +0,0 @@ -package plans - -import ( - "sort" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Plan is the top-level type representing a planned set of changes. -// -// A plan is a summary of the set of changes required to move from a current -// state to a goal state derived from configuration. The described changes -// are not applied directly, but contain an approximation of the final -// result that will be completed during apply by resolving any values that -// cannot be predicted. -// -// A plan must always be accompanied by the state and configuration it was -// built from, since the plan does not itself include all of the information -// required to make the changes indicated. -type Plan struct { - VariableValues map[string]DynamicValue - Changes *Changes - TargetAddrs []addrs.Targetable - ProviderSHA256s map[string][]byte - Backend Backend -} - -// Backend represents the backend-related configuration and other data as it -// existed when a plan was created. -type Backend struct { - // Type is the type of backend that the plan will apply against. - Type string - - // Config is the configuration of the backend, whose schema is decided by - // the backend Type. - Config DynamicValue - - // Workspace is the name of the workspace that was active when the plan - // was created. It is illegal to apply a plan created for one workspace - // to the state of another workspace. - // (This constraint is already enforced by the statefile lineage mechanism, - // but storing this explicitly allows us to return a better error message - // in the situation where the user has the wrong workspace selected.) - Workspace string -} - -func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { - dv, err := NewDynamicValue(config, configSchema.ImpliedType()) - if err != nil { - return nil, err - } - - return &Backend{ - Type: typeName, - Config: dv, - Workspace: workspaceName, - }, nil -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving plan. -// -// The result is de-duplicated so that each distinct address appears only once. -func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { - if p == nil || p.Changes == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, rc := range p.Changes.Resources { - m[rc.ProviderAddr.String()] = rc.ProviderAddr - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go deleted file mode 100644 index f053312b..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go +++ /dev/null @@ -1,191 +0,0 @@ -package discovery - -import ( - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" -) - -// FindPlugins looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider") and -// returns a PluginMetaSet representing the discovered potential-plugins. -// -// Currently this supports two different naming schemes. The current -// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing -// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is -// files directly in the given directory whose names are like -// terraform-$KIND-$NAME. -// -// Only one plugin will be returned for each unique plugin (name, version) -// pair, with preference given to files found in earlier directories. -// -// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. -func FindPlugins(kind string, dirs []string) PluginMetaSet { - return ResolvePluginPaths(FindPluginPaths(kind, dirs)) -} - -// FindPluginPaths looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider"). -// -// The return value is a list of absolute paths that appear to refer to -// plugins in the given directories, based only on what can be inferred -// from the naming scheme. The paths returned are ordered such that files -// in later dirs appear after files in earlier dirs in the given directory -// list. Within the same directory plugins are returned in a consistent but -// undefined order. -func FindPluginPaths(kind string, dirs []string) []string { - // This is just a thin wrapper around findPluginPaths so that we can - // use the latter in tests with a fake machineName so we can use our - // test fixtures. - return findPluginPaths(kind, dirs) -} - -func findPluginPaths(kind string, dirs []string) []string { - prefix := "terraform-" + kind + "-" - - ret := make([]string, 0, len(dirs)) - - for _, dir := range dirs { - items, err := ioutil.ReadDir(dir) - if err != nil { - // Ignore missing dirs, non-dirs, etc - continue - } - - log.Printf("[DEBUG] checking for %s in %q", kind, dir) - - for _, item := range items { - fullName := item.Name() - - if !strings.HasPrefix(fullName, prefix) { - continue - } - - // New-style paths must have a version segment in filename - if strings.Contains(strings.ToLower(fullName), "_v") { - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[DEBUG] found %s %q", kind, fullName) - ret = append(ret, filepath.Clean(absPath)) - continue - } - - // Legacy style with files directly in the base directory - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[WARN] found legacy %s %q", kind, fullName) - - ret = append(ret, filepath.Clean(absPath)) - } - } - - return ret -} - -// Returns true if and only if the given path refers to a file or a symlink -// to a file. -func pathIsFile(path string) bool { - info, err := os.Stat(path) - if err != nil { - return false - } - - return !info.IsDir() -} - -// ResolvePluginPaths takes a list of paths to plugin executables (as returned -// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the -// referenced plugins. -// -// If the same combination of plugin name and version appears multiple times, -// the earlier reference will be preferred. Several different versions of -// the same plugin name may be returned, in which case the methods of -// PluginMetaSet can be used to filter down. -func ResolvePluginPaths(paths []string) PluginMetaSet { - s := make(PluginMetaSet) - - type nameVersion struct { - Name string - Version string - } - found := make(map[nameVersion]struct{}) - - for _, path := range paths { - baseName := strings.ToLower(filepath.Base(path)) - if !strings.HasPrefix(baseName, "terraform-") { - // Should never happen with reasonable input - continue - } - - baseName = baseName[10:] - firstDash := strings.Index(baseName, "-") - if firstDash == -1 { - // Should never happen with reasonable input - continue - } - - baseName = baseName[firstDash+1:] - if baseName == "" { - // Should never happen with reasonable input - continue - } - - // Trim the .exe suffix used on Windows before we start wrangling - // the remainder of the path. - if strings.HasSuffix(baseName, ".exe") { - baseName = baseName[:len(baseName)-4] - } - - parts := strings.SplitN(baseName, "_v", 2) - name := parts[0] - version := VersionZero - if len(parts) == 2 { - version = parts[1] - } - - // Auto-installed plugins contain an extra name portion representing - // the expected plugin version, which we must trim off. - if underX := strings.Index(version, "_x"); underX != -1 { - version = version[:underX] - } - - if _, ok := found[nameVersion{name, version}]; ok { - // Skip duplicate versions of the same plugin - // (We do this during this step because after this we will be - // dealing with sets and thus lose our ordering with which to - // decide preference.) - continue - } - - s.Add(PluginMeta{ - Name: name, - Version: VersionStr(version), - Path: path, - }) - found[nameVersion{name, version}] = struct{}{} - } - - return s -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go deleted file mode 100644 index 1a100426..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go +++ /dev/null @@ -1,48 +0,0 @@ -package discovery - -// PluginCache is an interface implemented by objects that are able to maintain -// a cache of plugins. -type PluginCache interface { - // CachedPluginPath returns a path where the requested plugin is already - // cached, or an empty string if the requested plugin is not yet cached. - CachedPluginPath(kind string, name string, version Version) string - - // InstallDir returns the directory that new plugins should be installed into - // in order to populate the cache. This directory should be used as the - // first argument to getter.Get when downloading plugins with go-getter. - // - // After installing into this directory, use CachedPluginPath to obtain the - // path where the plugin was installed. - InstallDir() string -} - -// NewLocalPluginCache returns a PluginCache that caches plugins in a -// given local directory. -func NewLocalPluginCache(dir string) PluginCache { - return &pluginCache{ - Dir: dir, - } -} - -type pluginCache struct { - Dir string -} - -func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { - allPlugins := FindPlugins(kind, []string{c.Dir}) - plugins := allPlugins.WithName(name).WithVersion(version) - - if plugins.Count() == 0 { - // nothing cached - return "" - } - - // There should generally be only one plugin here; if there's more than - // one match for some reason then we'll just choose one arbitrarily. - plugin := plugins.Newest() - return plugin.Path -} - -func (c *pluginCache) InstallDir() string { - return c.Dir -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go deleted file mode 100644 index bdcebcb9..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go +++ /dev/null @@ -1,41 +0,0 @@ -package discovery - -import ( - "crypto/sha256" - "io" - "os" -) - -// PluginMeta is metadata about a plugin, useful for launching the plugin -// and for understanding which plugins are available. -type PluginMeta struct { - // Name is the name of the plugin, e.g. as inferred from the plugin - // binary's filename, or by explicit configuration. - Name string - - // Version is the semver version of the plugin, expressed as a string - // that might not be semver-valid. - Version VersionStr - - // Path is the absolute path of the executable that can be launched - // to provide the RPC server for this plugin. - Path string -} - -// SHA256 returns a SHA256 hash of the content of the referenced executable -// file, or an error if the file's contents cannot be read. -func (m PluginMeta) SHA256() ([]byte, error) { - f, err := os.Open(m.Path) - if err != nil { - return nil, err - } - defer f.Close() - - h := sha256.New() - _, err = io.Copy(h, f) - if err != nil { - return nil, err - } - - return h.Sum(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go deleted file mode 100644 index 3a992892..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go +++ /dev/null @@ -1,195 +0,0 @@ -package discovery - -// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. -// -// Methods on this type allow filtering of the set to produce subsets that -// meet more restrictive criteria. -type PluginMetaSet map[PluginMeta]struct{} - -// Add inserts the given PluginMeta into the receiving set. This is a no-op -// if the given meta is already present. -func (s PluginMetaSet) Add(p PluginMeta) { - s[p] = struct{}{} -} - -// Remove removes the given PluginMeta from the receiving set. This is a no-op -// if the given meta is not already present. -func (s PluginMetaSet) Remove(p PluginMeta) { - delete(s, p) -} - -// Has returns true if the given meta is in the receiving set, or false -// otherwise. -func (s PluginMetaSet) Has(p PluginMeta) bool { - _, ok := s[p] - return ok -} - -// Count returns the number of metas in the set -func (s PluginMetaSet) Count() int { - return len(s) -} - -// ValidateVersions returns two new PluginMetaSets, separating those with -// versions that have syntax-valid semver versions from those that don't. -// -// Eliminating invalid versions from consideration (and possibly warning about -// them) is usually the first step of working with a meta set after discovery -// has completed. -func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { - valid = make(PluginMetaSet) - invalid = make(PluginMetaSet) - for p := range s { - if _, err := p.Version.Parse(); err == nil { - valid.Add(p) - } else { - invalid.Add(p) - } - } - return -} - -// WithName returns the subset of metas that have the given name. -func (s PluginMetaSet) WithName(name string) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - if p.Name == name { - ns.Add(p) - } - } - return ns -} - -// WithVersion returns the subset of metas that have the given version. -// -// This should be used only with the "valid" result from ValidateVersions; -// it will ignore any plugin metas that have invalid version strings. -func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - gotVersion, err := p.Version.Parse() - if err != nil { - continue - } - if gotVersion.Equal(version) { - ns.Add(p) - } - } - return ns -} - -// ByName groups the metas in the set by their Names, returning a map. -func (s PluginMetaSet) ByName() map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - ret[p.Name].Add(p) - } - return ret -} - -// Newest returns the one item from the set that has the newest Version value. -// -// The result is meaningful only if the set is already filtered such that -// all of the metas have the same Name. -// -// If there isn't at least one meta in the set then this function will panic. -// Use Count() to ensure that there is at least one value before calling. -// -// If any of the metas have invalid version strings then this function will -// panic. Use ValidateVersions() first to filter out metas with invalid -// versions. -// -// If two metas have the same Version then one is arbitrarily chosen. This -// situation should be avoided by pre-filtering the set. -func (s PluginMetaSet) Newest() PluginMeta { - if len(s) == 0 { - panic("can't call NewestStable on empty PluginMetaSet") - } - - var first = true - var winner PluginMeta - var winnerVersion Version - for p := range s { - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - - if first == true || version.NewerThan(winnerVersion) { - winner = p - winnerVersion = version - first = false - } - } - - return winner -} - -// ConstrainVersions takes a set of requirements and attempts to -// return a map from name to a set of metas that have the matching -// name and an appropriate version. -// -// If any of the given requirements match *no* plugins then its PluginMetaSet -// in the returned map will be empty. -// -// All viable metas are returned, so the caller can apply any desired filtering -// to reduce down to a single option. For example, calling Newest() to obtain -// the highest available version. -// -// If any of the metas in the set have invalid version strings then this -// function will panic. Use ValidateVersions() first to filter out metas with -// invalid versions. -func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - name := p.Name - allowedVersions, ok := reqd[name] - if !ok { - continue - } - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - if allowedVersions.Allows(version) { - ret[p.Name].Add(p) - } - } - return ret -} - -// OverridePaths returns a new set where any existing plugins with the given -// names are removed and replaced with the single path given in the map. -// -// This is here only to continue to support the legacy way of overriding -// plugin binaries in the .terraformrc file. It treats all given plugins -// as pre-versioning (version 0.0.0). This mechanism will eventually be -// phased out, with vendor directories being the intended replacement. -func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { - ret := make(PluginMetaSet) - for p := range s { - if _, ok := paths[p.Name]; ok { - // Skip plugins that we're overridding - continue - } - - ret.Add(p) - } - - // Now add the metadata for overriding plugins - for name, path := range paths { - ret.Add(PluginMeta{ - Name: name, - Version: VersionZero, - Path: path, - }) - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go deleted file mode 100644 index 0466ab25..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go +++ /dev/null @@ -1,111 +0,0 @@ -package discovery - -import ( - "bytes" -) - -// PluginInstallProtocolVersion is the protocol version TF-core -// supports to communicate with servers, and is used to resolve -// plugin discovery with terraform registry, in addition to -// any specified plugin version constraints -const PluginInstallProtocolVersion = 5 - -// PluginRequirements describes a set of plugins (assumed to be of a consistent -// kind) that are required to exist and have versions within the given -// corresponding sets. -type PluginRequirements map[string]*PluginConstraints - -// PluginConstraints represents an element of PluginRequirements describing -// the constraints for a single plugin. -type PluginConstraints struct { - // Specifies that the plugin's version must be within the given - // constraints. - Versions Constraints - - // If non-nil, the hash of the on-disk plugin executable must exactly - // match the SHA256 hash given here. - SHA256 []byte -} - -// Allows returns true if the given version is within the receiver's version -// constraints. -func (s *PluginConstraints) Allows(v Version) bool { - return s.Versions.Allows(v) -} - -// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, -// either because it matches the constraint or because there is no such -// constraint. -func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { - if s.SHA256 == nil { - return true - } - return bytes.Equal(s.SHA256, digest) -} - -// Merge takes the contents of the receiver and the other given requirements -// object and merges them together into a single requirements structure -// that satisfies both sets of requirements. -// -// Note that it doesn't make sense to merge two PluginRequirements with -// differing required plugin SHA256 hashes, since the result will never -// match any plugin. -func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { - ret := make(PluginRequirements) - for n, c := range r { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - for n, c := range other { - if existing, exists := ret[n]; exists { - ret[n].Versions = ret[n].Versions.Append(c.Versions) - - if existing.SHA256 != nil { - if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { - // If we've been asked to merge two constraints with - // different SHA256 hashes then we'll produce a dummy value - // that can never match anything. This is a silly edge case - // that no reasonable caller should hit. - ret[n].SHA256 = []byte(invalidProviderHash) - } - } else { - ret[n].SHA256 = c.SHA256 // might still be nil - } - } else { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - } - return ret -} - -// LockExecutables applies additional constraints to the receiver that -// require plugin executables with specific SHA256 digests. This modifies -// the receiver in-place, since it's intended to be applied after -// version constraints have been resolved. -// -// The given map must include a key for every plugin that is already -// required. If not, any missing keys will cause the corresponding plugin -// to never match, though the direct caller doesn't necessarily need to -// guarantee this as long as the downstream code _applying_ these constraints -// is able to deal with the non-match in some way. -func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { - for name, cons := range r { - digest := sha256s[name] - - if digest == nil { - // Prevent any match, which will then presumably cause the - // downstream consumer of this requirements to report an error. - cons.SHA256 = []byte(invalidProviderHash) - continue - } - - cons.SHA256 = digest - } -} - -const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go deleted file mode 100644 index 4311d510..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go +++ /dev/null @@ -1,77 +0,0 @@ -package discovery - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" -) - -const VersionZero = "0.0.0" - -// A VersionStr is a string containing a possibly-invalid representation -// of a semver version number. Call Parse on it to obtain a real Version -// object, or discover that it is invalid. -type VersionStr string - -// Parse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s VersionStr) Parse() (Version, error) { - raw, err := version.NewVersion(string(s)) - if err != nil { - return Version{}, err - } - return Version{raw}, nil -} - -// MustParse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then it panics. -func (s VersionStr) MustParse() Version { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Version represents a version number that has been parsed from -// a semver string and known to be valid. -type Version struct { - // We wrap this here just because it avoids a proliferation of - // direct go-version imports all over the place, and keeps the - // version-processing details within this package. - raw *version.Version -} - -func (v Version) String() string { - return v.raw.String() -} - -func (v Version) NewerThan(other Version) bool { - return v.raw.GreaterThan(other.raw) -} - -func (v Version) Equal(other Version) bool { - return v.raw.Equal(other.raw) -} - -// IsPrerelease determines if version is a prerelease -func (v Version) IsPrerelease() bool { - return v.raw.Prerelease() != "" -} - -// MinorUpgradeConstraintStr returns a ConstraintStr that would permit -// minor upgrades relative to the receiving version. -func (v Version) MinorUpgradeConstraintStr() ConstraintStr { - segments := v.raw.Segments() - return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) -} - -type Versions []Version - -// Sort sorts version from newest to oldest. -func (v Versions) Sort() { - sort.Slice(v, func(i, j int) bool { - return v[i].NewerThan(v[j]) - }) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go deleted file mode 100644 index de02f5ec..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go +++ /dev/null @@ -1,89 +0,0 @@ -package discovery - -import ( - "sort" - - version "github.com/hashicorp/go-version" -) - -// A ConstraintStr is a string containing a possibly-invalid representation -// of a version constraint provided in configuration. Call Parse on it to -// obtain a real Constraint object, or discover that it is invalid. -type ConstraintStr string - -// Parse transforms a ConstraintStr into a Constraints if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s ConstraintStr) Parse() (Constraints, error) { - raw, err := version.NewConstraint(string(s)) - if err != nil { - return Constraints{}, err - } - return Constraints{raw}, nil -} - -// MustParse is like Parse but it panics if the constraint string is invalid. -func (s ConstraintStr) MustParse() Constraints { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Constraints represents a set of versions which any given Version is either -// a member of or not. -type Constraints struct { - raw version.Constraints -} - -// NewConstraints creates a Constraints based on a version.Constraints. -func NewConstraints(c version.Constraints) Constraints { - return Constraints{c} -} - -// AllVersions is a Constraints containing all versions -var AllVersions Constraints - -func init() { - AllVersions = Constraints{ - raw: make(version.Constraints, 0), - } -} - -// Allows returns true if the given version permitted by the receiving -// constraints set. -func (s Constraints) Allows(v Version) bool { - return s.raw.Check(v.raw) -} - -// Append combines the receiving set with the given other set to produce -// a set that is the intersection of both sets, which is to say that resulting -// constraints contain only the versions that are members of both. -func (s Constraints) Append(other Constraints) Constraints { - raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) - - // Since "raw" is a list of constraints that remove versions from the set, - // "Intersection" is implemented by concatenating together those lists, - // thus leaving behind only the versions not removed by either list. - raw = append(raw, s.raw...) - raw = append(raw, other.raw...) - - // while the set is unordered, we sort these lexically for consistent output - sort.Slice(raw, func(i, j int) bool { - return raw[i].String() < raw[j].String() - }) - - return Constraints{raw} -} - -// String returns a string representation of the set members as a set -// of range constraints. -func (s Constraints) String() string { - return s.raw.String() -} - -// Unconstrained returns true if and only if the receiver is an empty -// constraint set. -func (s Constraints) Unconstrained() bool { - return len(s.raw) == 0 -} diff --git a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go deleted file mode 100644 index 85ff4c96..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go +++ /dev/null @@ -1,33 +0,0 @@ -package providers - -import ( - "sort" - - "github.com/hashicorp/terraform/addrs" -) - -// AddressedTypesAbs is a helper that extracts all of the distinct provider -// types from the given list of absolute provider configuration addresses. -func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []addrs.Provider { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]addrs.Provider{} - for _, addr := range providerAddrs { - m[addr.Provider.String()] = addr.Provider - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - - ret := make([]addrs.Provider, len(names)) - for i, name := range names { - ret[i] = m[name] - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/providers/doc.go b/vendor/github.com/hashicorp/terraform/providers/doc.go deleted file mode 100644 index 39aa1de6..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package providers contains the interface and primary types required to -// implement a Terraform resource provider. -package providers diff --git a/vendor/github.com/hashicorp/terraform/providers/factory.go b/vendor/github.com/hashicorp/terraform/providers/factory.go deleted file mode 100644 index 1586ca34..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/factory.go +++ /dev/null @@ -1,63 +0,0 @@ -package providers - -// Factory is a function type that creates a new instance of a resource -// provider, or returns an error if that is impossible. -type Factory func() (Interface, error) - -// FactoryFixed is a helper that creates a Factory that just returns some given -// single provider. -// -// Unlike usual factories, the exact same instance is returned for each call -// to the factory and so this must be used in only specialized situations where -// the caller can take care to either not mutate the given provider at all -// or to mutate it in ways that will not cause unexpected behavior for others -// holding the same reference. -func FactoryFixed(p Interface) Factory { - return func() (Interface, error) { - return p, nil - } -} - -// ProviderHasResource is a helper that requests schema from the given provider -// and checks if it has a resource type of the given name. -// -// This function is more expensive than it may first appear since it must -// retrieve the entire schema from the underlying provider, and so it should -// be used sparingly and especially not in tight loops. -// -// Since retrieving the provider may fail (e.g. if the provider is accessed -// over an RPC channel that has operational problems), this function will -// return false if the schema cannot be retrieved, under the assumption that -// a subsequent call to do anything with the resource type would fail -// anyway. -func ProviderHasResource(provider Interface, typeName string) bool { - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - return false - } - - _, exists := resp.ResourceTypes[typeName] - return exists -} - -// ProviderHasDataSource is a helper that requests schema from the given -// provider and checks if it has a data source of the given name. -// -// This function is more expensive than it may first appear since it must -// retrieve the entire schema from the underlying provider, and so it should -// be used sparingly and especially not in tight loops. -// -// Since retrieving the provider may fail (e.g. if the provider is accessed -// over an RPC channel that has operational problems), this function will -// return false if the schema cannot be retrieved, under the assumption that -// a subsequent call to do anything with the data source would fail -// anyway. -func ProviderHasDataSource(provider Interface, dataSourceName string) bool { - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - return false - } - - _, exists := resp.DataSources[dataSourceName] - return exists -} diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go deleted file mode 100644 index b27e3dbc..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/provider.go +++ /dev/null @@ -1,386 +0,0 @@ -package providers - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// Interface represents the set of methods required for a complete resource -// provider plugin. -type Interface interface { - // GetSchema returns the complete schema for the provider. - GetSchema() GetSchemaResponse - - // PrepareProviderConfig allows the provider to validate the configuration - // values, and set or override any values with defaults. - PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse - - // ValidateResourceTypeConfig allows the provider to validate the resource - // configuration values. - ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse - - // ValidateDataSource allows the provider to validate the data source - // configuration values. - ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse - - // UpgradeResourceState is called when the state loader encounters an - // instance state whose schema version is less than the one reported by the - // currently-used version of the corresponding provider, and the upgraded - // result is used for any further processing. - UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse - - // Configure configures and initialized the provider. - Configure(ConfigureRequest) ConfigureResponse - - // Stop is called when the provider should halt any in-flight actions. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provider after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // ReadResource refreshes a resource and returns its current state. - ReadResource(ReadResourceRequest) ReadResourceResponse - - // PlanResourceChange takes the current state and proposed state of a - // resource, and returns the planned final state. - PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse - - // ApplyResourceChange takes the planned state for a resource, which may - // yet contain unknown computed values, and applies the changes returning - // the final state. - ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse - - // ImportResourceState requests that the given resource be imported. - ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse - - // ReadDataSource returns the data source's current state. - ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provider is the schema for the provider itself. - Provider Schema - - // ProviderMeta is the schema for the provider's meta info in a module - ProviderMeta Schema - - // ResourceTypes map the resource type name to that type's schema. - ResourceTypes map[string]Schema - - // DataSources maps the data source name to that data source's schema. - DataSources map[string]Schema - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// Schema pairs a provider or resource schema with that schema's version. -// This is used to be able to upgrade the schema in UpgradeResourceState. -type Schema struct { - Version int64 - Block *configschema.Block -} - -type PrepareProviderConfigRequest struct { - // Config is the raw configuration value for the provider. - Config cty.Value -} - -type PrepareProviderConfigResponse struct { - // PreparedConfig is the configuration as prepared by the provider. - PreparedConfig cty.Value - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateResourceTypeConfigRequest struct { - // TypeName is the name of the resource type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateResourceTypeConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateDataSourceConfigRequest struct { - // TypeName is the name of the data source type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateDataSourceConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type UpgradeResourceStateRequest struct { - // TypeName is the name of the resource type being upgraded - TypeName string - - // Version is version of the schema that created the current state. - Version int64 - - // RawStateJSON and RawStateFlatmap contiain the state that needs to be - // upgraded to match the current schema version. Because the schema is - // unknown, this contains only the raw data as stored in the state. - // RawStateJSON is the current json state encoding. - // RawStateFlatmap is the legacy flatmap encoding. - // Only on of these fields may be set for the upgrade request. - RawStateJSON []byte - RawStateFlatmap map[string]string -} - -type UpgradeResourceStateResponse struct { - // UpgradedState is the newly upgraded resource state. - UpgradedState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ConfigureRequest struct { - // Terraform version is the version string from the running instance of - // terraform. Providers can use TerraformVersion to verify compatibility, - // and to store for informational purposes. - TerraformVersion string - - // Config is the complete configuration value for the provider. - Config cty.Value -} - -type ConfigureResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ReadResourceRequest struct { - // TypeName is the name of the resource type being read. - TypeName string - - // PriorState contains the previously saved state value for this resource. - PriorState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type ReadResourceResponse struct { - // NewState contains the current state of the resource. - NewState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type PlanResourceChangeRequest struct { - // TypeName is the name of the resource type to plan. - TypeName string - - // PriorState is the previously saved state value for this resource. - PriorState cty.Value - - // ProposedNewState is the expected state after the new configuration is - // applied. This is created by directly applying the configuration to the - // PriorState. The provider is then responsible for applying any further - // changes required to create the proposed final state. - ProposedNewState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the ProposedNewState in most circumstances. - Config cty.Value - - // PriorPrivate is the previously saved private data returned from the - // provider during the last apply. - PriorPrivate []byte - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type PlanResourceChangeResponse struct { - // PlannedState is the expected state of the resource once the current - // configuration is applied. - PlannedState cty.Value - - // RequiresReplace is the list of thee attributes that are requiring - // resource replacement. - RequiresReplace []cty.Path - - // PlannedPrivate is an opaque blob that is not interpreted by terraform - // core. This will be saved and relayed back to the provider during - // ApplyResourceChange. - PlannedPrivate []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ApplyResourceChangeRequest struct { - // TypeName is the name of the resource type being applied. - TypeName string - - // PriorState is the current state of resource. - PriorState cty.Value - - // Planned state is the state returned from PlanResourceChange, and should - // represent the new state, minus any remaining computed attributes. - PlannedState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the PlannedState in most circumstances. - Config cty.Value - - // PlannedPrivate is the same value as returned by PlanResourceChange. - PlannedPrivate []byte - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type ApplyResourceChangeResponse struct { - // NewState is the new complete state after applying the planned change. - // In the event of an error, NewState should represent the most recent - // known state of the resource, if it exists. - NewState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ImportResourceStateRequest struct { - // TypeName is the name of the resource type to be imported. - TypeName string - - // ID is a string with which the provider can identify the resource to be - // imported. - ID string -} - -type ImportResourceStateResponse struct { - // ImportedResources contains one or more state values related to the - // imported resource. It is not required that these be complete, only that - // there is enough identifying information for the provider to successfully - // update the states in ReadResource. - ImportedResources []ImportedResource - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// ImportedResource represents an object being imported into Terraform with the -// help of a provider. An ImportedObject is a RemoteObject that has been read -// by the provider's import handler but hasn't yet been committed to state. -type ImportedResource struct { - // TypeName is the name of the resource type associated with the - // returned state. It's possible for providers to import multiple related - // types with a single import request. - TypeName string - - // State is the state of the remote object being imported. This may not be - // complete, but must contain enough information to uniquely identify the - // resource. - State cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -// AsInstanceObject converts the receiving ImportedObject into a -// ResourceInstanceObject that has status ObjectReady. -// -// The returned object does not know its own resource type, so the caller must -// retain the ResourceType value from the source object if this information is -// needed. -// -// The returned object also has no dependency addresses, but the caller may -// freely modify the direct fields of the returned object without affecting -// the receiver. -func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { - return &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: ir.State, - Private: ir.Private, - } -} - -type ReadDataSourceRequest struct { - // TypeName is the name of the data source type to Read. - TypeName string - - // Config is the complete configuration for the requested data source. - Config cty.Value - - // ProviderMeta is the configuration for the provider_meta block for the - // module and provider this resource belongs to. Its use is defined by - // each provider, and it should not be used without coordination with - // HashiCorp. It is considered experimental and subject to change. - ProviderMeta cty.Value -} - -type ReadDataSourceResponse struct { - // State is the current state of the requested data source. - State cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/doc.go b/vendor/github.com/hashicorp/terraform/provisioners/doc.go deleted file mode 100644 index b03ba9a1..00000000 --- a/vendor/github.com/hashicorp/terraform/provisioners/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package provisioners contains the interface and primary types to implement a -// Terraform resource provisioner. -package provisioners diff --git a/vendor/github.com/hashicorp/terraform/provisioners/factory.go b/vendor/github.com/hashicorp/terraform/provisioners/factory.go deleted file mode 100644 index 590b97a8..00000000 --- a/vendor/github.com/hashicorp/terraform/provisioners/factory.go +++ /dev/null @@ -1,19 +0,0 @@ -package provisioners - -// Factory is a function type that creates a new instance of a resource -// provisioner, or returns an error if that is impossible. -type Factory func() (Interface, error) - -// FactoryFixed is a helper that creates a Factory that just returns some given -// single provisioner. -// -// Unlike usual factories, the exact same instance is returned for each call -// to the factory and so this must be used in only specialized situations where -// the caller can take care to either not mutate the given provider at all -// or to mutate it in ways that will not cause unexpected behavior for others -// holding the same reference. -func FactoryFixed(p Interface) Factory { - return func() (Interface, error) { - return p, nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go deleted file mode 100644 index e53c8848..00000000 --- a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go +++ /dev/null @@ -1,82 +0,0 @@ -package provisioners - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Interface is the set of methods required for a resource provisioner plugin. -type Interface interface { - // GetSchema returns the schema for the provisioner configuration. - GetSchema() GetSchemaResponse - - // ValidateProvisionerConfig allows the provisioner to validate the - // configuration values. - ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse - - // ProvisionResource runs the provisioner with provided configuration. - // ProvisionResource blocks until the execution is complete. - // If the returned diagnostics contain any errors, the resource will be - // left in a tainted state. - ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse - - // Stop is called to interrupt the provisioner. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provisioner after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provisioner contains the schema for this provisioner. - Provisioner *configschema.Block - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// UIOutput provides the Output method for resource provisioner -// plugins to write any output to the UI. -// -// Provisioners may call the Output method multiple times while Apply is in -// progress. It is invalid to call Output after Apply returns. -type UIOutput interface { - Output(string) -} - -type ValidateProvisionerConfigRequest struct { - // Config is the complete configuration to be used for the provisioner. - Config cty.Value -} - -type ValidateProvisionerConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ProvisionResourceRequest struct { - // Config is the complete provisioner configuration. - Config cty.Value - - // Connection contains any information required to access the resource - // instance. - Connection cty.Value - - // UIOutput is used to return output during the Apply operation. - UIOutput UIOutput -} - -type ProvisionResourceResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform/states/doc.go b/vendor/github.com/hashicorp/terraform/states/doc.go deleted file mode 100644 index 7dd74ac7..00000000 --- a/vendor/github.com/hashicorp/terraform/states/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package states contains the types that are used to represent Terraform -// states. -package states diff --git a/vendor/github.com/hashicorp/terraform/states/instance_generation.go b/vendor/github.com/hashicorp/terraform/states/instance_generation.go deleted file mode 100644 index 617ad4ea..00000000 --- a/vendor/github.com/hashicorp/terraform/states/instance_generation.go +++ /dev/null @@ -1,24 +0,0 @@ -package states - -// Generation is used to represent multiple objects in a succession of objects -// represented by a single resource instance address. A resource instance can -// have multiple generations over its lifetime due to object replacement -// (when a change can't be applied without destroying and re-creating), and -// multiple generations can exist at the same time when create_before_destroy -// is used. -// -// A Generation value can either be the value of the variable "CurrentGen" or -// a value of type DeposedKey. Generation values can be compared for equality -// using "==" and used as map keys. The zero value of Generation (nil) is not -// a valid generation and must not be used. -type Generation interface { - generation() -} - -// CurrentGen is the Generation representing the currently-active object for -// a resource instance. -var CurrentGen Generation - -type currentGen struct{} - -func (g currentGen) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object.go b/vendor/github.com/hashicorp/terraform/states/instance_object.go deleted file mode 100644 index d1b53e29..00000000 --- a/vendor/github.com/hashicorp/terraform/states/instance_object.go +++ /dev/null @@ -1,128 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" -) - -// ResourceInstanceObject is the local representation of a specific remote -// object associated with a resource instance. In practice not all remote -// objects are actually remote in the sense of being accessed over the network, -// but this is the most common case. -// -// It is not valid to mutate a ResourceInstanceObject once it has been created. -// Instead, create a new object and replace the existing one. -type ResourceInstanceObject struct { - // Value is the object-typed value representing the remote object within - // Terraform. - Value cty.Value - - // Private is an opaque value set by the provider when this object was - // last created or updated. Terraform Core does not use this value in - // any way and it is not exposed anywhere in the user interface, so - // a provider can use it for retaining any necessary private state. - Private []byte - - // Status represents the "readiness" of the object as of the last time - // it was updated. - Status ObjectStatus - - // Dependencies is a set of absolute address to other resources this - // instance dependeded on when it was applied. This is used to construct - // the dependency relationships for an object whose configuration is no - // longer available, such as if it has been removed from configuration - // altogether, or is now deposed. - Dependencies []addrs.ConfigResource - - // CreateBeforeDestroy reflects the status of the lifecycle - // create_before_destroy option when this instance was last updated. - // Because create_before_destroy also effects the overall ordering of the - // destroy operations, we need to record the status to ensure a resource - // removed from the config will still be destroyed in the same manner. - CreateBeforeDestroy bool -} - -// ObjectStatus represents the status of a RemoteObject. -type ObjectStatus rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus - -const ( - // ObjectReady is an object status for an object that is ready to use. - ObjectReady ObjectStatus = 'R' - - // ObjectTainted is an object status representing an object that is in - // an unrecoverable bad state due to a partial failure during a create, - // update, or delete operation. Since it cannot be moved into the - // ObjectRead state, a tainted object must be replaced. - ObjectTainted ObjectStatus = 'T' - - // ObjectPlanned is a special object status used only for the transient - // placeholder objects we place into state during the refresh and plan - // walks to stand in for objects that will be created during apply. - // - // Any object of this status must have a corresponding change recorded - // in the current plan, whose value must then be used in preference to - // the value stored in state when evaluating expressions. A planned - // object stored in state will be incomplete if any of its attributes are - // not yet known, and the plan must be consulted in order to "see" those - // unknown values, because the state is not able to represent them. - ObjectPlanned ObjectStatus = 'P' -) - -// Encode marshals the value within the receiver to produce a -// ResourceInstanceObjectSrc ready to be written to a state file. -// -// The given type must be the implied type of the resource type schema, and -// the given value must conform to it. It is important to pass the schema -// type and not the object's own type so that dynamically-typed attributes -// will be stored correctly. The caller must also provide the version number -// of the schema that the given type was derived from, which will be recorded -// in the source object so it can be used to detect when schema migration is -// required on read. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - // Our state serialization can't represent unknown values, so we convert - // them to nulls here. This is lossy, but nobody should be writing unknown - // values here and expecting to get them out again later. - // - // We get unknown values here while we're building out a "planned state" - // during the plan phase, but the value stored in the plan takes precedence - // for expression evaluation. The apply step should never produce unknown - // values, but if it does it's the responsibility of the caller to detect - // and raise an error about that. - val := cty.UnknownAsNull(o.Value) - - src, err := ctyjson.Marshal(val, ty) - if err != nil { - return nil, err - } - - return &ResourceInstanceObjectSrc{ - SchemaVersion: schemaVersion, - AttrsJSON: src, - Private: o.Private, - Status: o.Status, - Dependencies: o.Dependencies, - CreateBeforeDestroy: o.CreateBeforeDestroy, - }, nil -} - -// AsTainted returns a deep copy of the receiver with the status updated to -// ObjectTainted. -func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { - if o == nil { - // A nil object can't be tainted, but we'll allow this anyway to - // avoid a crash, since we presumably intend to eventually record - // the object has having been deleted anyway. - return nil - } - ret := o.DeepCopy() - ret.Status = ObjectTainted - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go deleted file mode 100644 index bf790db0..00000000 --- a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go +++ /dev/null @@ -1,115 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// ResourceInstanceObjectSrc is a not-fully-decoded version of -// ResourceInstanceObject. Decoding of it can be completed by first handling -// any schema migration steps to get to the latest schema version and then -// calling method Decode with the implied type of the latest schema. -type ResourceInstanceObjectSrc struct { - // SchemaVersion is the resource-type-specific schema version number that - // was current when either AttrsJSON or AttrsFlat was encoded. Migration - // steps are required if this is less than the current version number - // reported by the corresponding provider. - SchemaVersion uint64 - - // AttrsJSON is a JSON-encoded representation of the object attributes, - // encoding the value (of the object type implied by the associated resource - // type schema) that represents this remote object in Terraform Language - // expressions, and is compared with configuration when producing a diff. - // - // This is retained in JSON format here because it may require preprocessing - // before decoding if, for example, the stored attributes are for an older - // schema version which the provider must upgrade before use. If the - // version is current, it is valid to simply decode this using the - // type implied by the current schema, without the need for the provider - // to perform an upgrade first. - // - // When writing a ResourceInstanceObject into the state, AttrsJSON should - // always be conformant to the current schema version and the current - // schema version should be recorded in the SchemaVersion field. - AttrsJSON []byte - - // AttrsFlat is a legacy form of attributes used in older state file - // formats, and in the new state format for objects that haven't yet been - // upgraded. This attribute is mutually exclusive with Attrs: for any - // ResourceInstanceObject, only one of these attributes may be populated - // and the other must be nil. - // - // An instance object with this field populated should be upgraded to use - // Attrs at the earliest opportunity, since this legacy flatmap-based - // format will be phased out over time. AttrsFlat should not be used when - // writing new or updated objects to state; instead, callers must follow - // the recommendations in the AttrsJSON documentation above. - AttrsFlat map[string]string - - // These fields all correspond to the fields of the same name on - // ResourceInstanceObject. - Private []byte - Status ObjectStatus - Dependencies []addrs.ConfigResource - CreateBeforeDestroy bool -} - -// Decode unmarshals the raw representation of the object attributes. Pass the -// implied type of the corresponding resource type schema for correct operation. -// -// Before calling Decode, the caller must check that the SchemaVersion field -// exactly equals the version number of the schema whose implied type is being -// passed, or else the result is undefined. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { - var val cty.Value - var err error - if os.AttrsFlat != nil { - // Legacy mode. We'll do our best to unpick this from the flatmap. - val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) - if err != nil { - return nil, err - } - } else { - val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) - if err != nil { - return nil, err - } - } - - return &ResourceInstanceObject{ - Value: val, - Status: os.Status, - Dependencies: os.Dependencies, - Private: os.Private, - CreateBeforeDestroy: os.CreateBeforeDestroy, - }, nil -} - -// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the -// metadata from the receiver and writing in the given new schema version -// and attribute value that are presumed to have resulted from upgrading -// from an older schema version. -func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - new := os.DeepCopy() - new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap - - // This is the same principle as ResourceInstanceObject.Encode, but - // avoiding a decode/re-encode cycle because we don't have type info - // available for the "old" attributes. - newAttrs = cty.UnknownAsNull(newAttrs) - src, err := ctyjson.Marshal(newAttrs, newType) - if err != nil { - return nil, err - } - - new.AttrsJSON = src - new.SchemaVersion = newSchemaVersion - return new, nil -} diff --git a/vendor/github.com/hashicorp/terraform/states/module.go b/vendor/github.com/hashicorp/terraform/states/module.go deleted file mode 100644 index 760625e0..00000000 --- a/vendor/github.com/hashicorp/terraform/states/module.go +++ /dev/null @@ -1,323 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" -) - -// Module is a container for the states of objects within a particular module. -type Module struct { - Addr addrs.ModuleInstance - - // Resources contains the state for each resource. The keys in this map are - // an implementation detail and must not be used by outside callers. - Resources map[string]*Resource - - // OutputValues contains the state for each output value. The keys in this - // map are output value names. - OutputValues map[string]*OutputValue - - // LocalValues contains the value for each named output value. The keys - // in this map are local value names. - LocalValues map[string]cty.Value -} - -// NewModule constructs an empty module state for the given module address. -func NewModule(addr addrs.ModuleInstance) *Module { - return &Module{ - Addr: addr, - Resources: map[string]*Resource{}, - OutputValues: map[string]*OutputValue{}, - LocalValues: map[string]cty.Value{}, - } -} - -// Resource returns the state for the resource with the given address within -// the receiving module state, or nil if the requested resource is not tracked -// in the state. -func (ms *Module) Resource(addr addrs.Resource) *Resource { - return ms.Resources[addr.String()] -} - -// ResourceInstance returns the state for the resource instance with the given -// address within the receiving module state, or nil if the requested instance -// is not tracked in the state. -func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { - rs := ms.Resource(addr.Resource) - if rs == nil { - return nil - } - return rs.Instance(addr.Key) -} - -// SetResourceProvider updates the resource-level metadata for the resource -// with the given address, creating the resource state for it if it doesn't -// already exist. -func (ms *Module) SetResourceProvider(addr addrs.Resource, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr) - if rs == nil { - rs = &Resource{ - Addr: addr.Absolute(ms.Addr), - Instances: map[addrs.InstanceKey]*ResourceInstance{}, - } - ms.Resources[addr.String()] = rs - } - - rs.ProviderConfig = provider -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed. -func (ms *Module) RemoveResource(addr addrs.Resource) { - delete(ms.Resources, addr.String()) -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance will be removed altogether. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr.Resource) - // if the resource is nil and the object is nil, don't do anything! - // you'll probably just cause issues - if obj == nil && rs == nil { - return - } - if obj == nil && rs != nil { - // does the resource have any other objects? - // if not then delete the whole resource - if len(rs.Instances) == 0 { - delete(ms.Resources, addr.Resource.String()) - return - } - // check for an existing resource, now that we've ensured that rs.Instances is more than 0/not nil - is := rs.Instance(addr.Key) - if is == nil { - // if there is no instance on the resource with this address and obj is nil, return and change nothing - return - } - // if we have an instance, update the current - is.Current = obj - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - // Delete the resource if it has no instances, but only if NoEach - if len(rs.Instances) == 0 { - delete(ms.Resources, addr.Resource.String()) - return - } - } - // Nothing more to do here, so return! - return - } - if rs == nil && obj != nil { - // We don't have have a resource so make one, which is a side effect of setResourceMeta - ms.SetResourceProvider(addr.Resource, provider) - // now we have a resource! so update the rs value to point to it - rs = ms.Resource(addr.Resource) - } - // Get our instance from the resource; it could be there or not at this point - is := rs.Instance(addr.Key) - if is == nil { - // if we don't have a resource, create one and add to the instances - is = rs.CreateInstance(addr.Key) - // update the resource meta because we have a new - ms.SetResourceProvider(addr.Resource, provider) - } - // Update the resource's ProviderConfig, in case the provider has updated - rs.ProviderConfig = provider - is.Current = obj -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceProvider(addr.Resource, provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - if obj != nil { - is.Deposed[key] = obj - } else { - delete(is.Deposed, key) - } - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceAll removes the record of all objects associated with -// the specified resource instance, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - delete(rs.Instances, addr.Key) - - if len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - is := rs.Instance(addr.Key) - if is == nil { - return - } - delete(is.Deposed, key) - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// deposeResourceInstanceObject is the real implementation of -// SyncState.DeposeResourceInstanceObject. -func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { - is := ms.ResourceInstance(addr) - if is == nil { - return NotDeposed - } - return is.deposeCurrentObject(forceKey) -} - -// maybeRestoreResourceInstanceDeposed is the real implementation of -// SyncState.MaybeRestoreResourceInstanceDeposed. -func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { - rs := ms.Resource(addr.Resource) - if rs == nil { - return false - } - is := rs.Instance(addr.Key) - if is == nil { - return false - } - if is.Current != nil { - return false - } - if len(is.Deposed) == 0 { - return false - } - is.Current = is.Deposed[key] - delete(is.Deposed, key) - return true -} - -// SetOutputValue writes an output value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { - os := &OutputValue{ - Addr: addrs.AbsOutputValue{ - Module: ms.Addr, - OutputValue: addrs.OutputValue{ - Name: name, - }, - }, - Value: value, - Sensitive: sensitive, - } - ms.OutputValues[name] = os - return os -} - -// RemoveOutputValue removes the output value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveOutputValue(name string) { - delete(ms.OutputValues, name) -} - -// SetLocalValue writes a local value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetLocalValue(name string, value cty.Value) { - ms.LocalValues[name] = value -} - -// RemoveLocalValue removes the local value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveLocalValue(name string) { - delete(ms.LocalValues, name) -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// You probably shouldn't call this! See the method of the same name on -// type State for more information on what this is for and the rare situations -// where it is safe to use. -func (ms *Module) PruneResourceHusks() { - for _, rs := range ms.Resources { - if len(rs.Instances) == 0 { - ms.RemoveResource(rs.Addr.Resource) - } - } -} - -// empty returns true if the receving module state is contributing nothing -// to the state. In other words, it returns true if the module could be -// removed from the state altogether without changing the meaning of the state. -// -// In practice a module containing no objects is the same as a non-existent -// module, and so we can opportunistically clean up once a module becomes -// empty on the assumption that it will be re-added if needed later. -func (ms *Module) empty() bool { - if ms == nil { - return true - } - - // This must be updated to cover any new collections added to Module - // in future. - return (len(ms.Resources) == 0 && - len(ms.OutputValues) == 0 && - len(ms.LocalValues) == 0) -} diff --git a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go deleted file mode 100644 index 96a6db2f..00000000 --- a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ObjectReady-82] - _ = x[ObjectTainted-84] - _ = x[ObjectPlanned-80] -} - -const ( - _ObjectStatus_name_0 = "ObjectPlanned" - _ObjectStatus_name_1 = "ObjectReady" - _ObjectStatus_name_2 = "ObjectTainted" -) - -func (i ObjectStatus) String() string { - switch { - case i == 80: - return _ObjectStatus_name_0 - case i == 82: - return _ObjectStatus_name_1 - case i == 84: - return _ObjectStatus_name_2 - default: - return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/output_value.go b/vendor/github.com/hashicorp/terraform/states/output_value.go deleted file mode 100644 index 268420cf..00000000 --- a/vendor/github.com/hashicorp/terraform/states/output_value.go +++ /dev/null @@ -1,16 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// OutputValue represents the state of a particular output value. -// -// It is not valid to mutate an OutputValue object once it has been created. -// Instead, create an entirely new OutputValue to replace the previous one. -type OutputValue struct { - Addr addrs.AbsOutputValue - Value cty.Value - Sensitive bool -} diff --git a/vendor/github.com/hashicorp/terraform/states/resource.go b/vendor/github.com/hashicorp/terraform/states/resource.go deleted file mode 100644 index 0b6a4509..00000000 --- a/vendor/github.com/hashicorp/terraform/states/resource.go +++ /dev/null @@ -1,215 +0,0 @@ -package states - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform/addrs" -) - -// Resource represents the state of a resource. -type Resource struct { - // Addr is the absolute address for the resource this state object - // belongs to. - Addr addrs.AbsResource - - // Instances contains the potentially-multiple instances associated with - // this resource. This map can contain a mixture of different key types, - // but only the ones of InstanceKeyType are considered current. - Instances map[addrs.InstanceKey]*ResourceInstance - - // ProviderConfig is the absolute address for the provider configuration that - // most recently managed this resource. This is used to connect a resource - // with a provider configuration when the resource configuration block is - // not available, such as if it has been removed from configuration - // altogether. - ProviderConfig addrs.AbsProviderConfig -} - -// Instance returns the state for the instance with the given key, or nil -// if no such instance is tracked within the state. -func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { - return rs.Instances[key] -} - -// CreateInstance creates an instance and adds it to the resource -func (rs *Resource) CreateInstance(key addrs.InstanceKey) *ResourceInstance { - is := NewResourceInstance() - rs.Instances[key] = is - return is -} - -// EnsureInstance returns the state for the instance with the given key, -// creating a new empty state for it if one doesn't already exist. -// -// Because this may create and save a new state, it is considered to be -// a write operation. -func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { - ret := rs.Instance(key) - if ret == nil { - ret = NewResourceInstance() - rs.Instances[key] = ret - } - return ret -} - -// ResourceInstance represents the state of a particular instance of a resource. -type ResourceInstance struct { - // Current, if non-nil, is the remote object that is currently represented - // by the corresponding resource instance. - Current *ResourceInstanceObjectSrc - - // Deposed, if len > 0, contains any remote objects that were previously - // represented by the corresponding resource instance but have been - // replaced and are pending destruction due to the create_before_destroy - // lifecycle mode. - Deposed map[DeposedKey]*ResourceInstanceObjectSrc -} - -// NewResourceInstance constructs and returns a new ResourceInstance, ready to -// use. -func NewResourceInstance() *ResourceInstance { - return &ResourceInstance{ - Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, - } -} - -// HasCurrent returns true if this resource instance has a "current"-generation -// object. Most instances do, but this can briefly be false during a -// create-before-destroy replace operation when the current has been deposed -// but its replacement has not yet been created. -func (i *ResourceInstance) HasCurrent() bool { - return i != nil && i.Current != nil -} - -// HasDeposed returns true if this resource instance has a deposed object -// with the given key. -func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { - return i != nil && i.Deposed[key] != nil -} - -// HasAnyDeposed returns true if this resource instance has one or more -// deposed objects. -func (i *ResourceInstance) HasAnyDeposed() bool { - return i != nil && len(i.Deposed) > 0 -} - -// HasObjects returns true if this resource has any objects at all, whether -// current or deposed. -func (i *ResourceInstance) HasObjects() bool { - return i.Current != nil || len(i.Deposed) != 0 -} - -// deposeCurrentObject is part of the real implementation of -// SyncState.DeposeResourceInstanceObject. The exported method uses a lock -// to ensure that we can safely allocate an unused deposed key without -// collision. -func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { - if !i.HasCurrent() { - return NotDeposed - } - - key := forceKey - if key == NotDeposed { - key = i.findUnusedDeposedKey() - } else { - if _, exists := i.Deposed[key]; exists { - panic(fmt.Sprintf("forced key %s is already in use", forceKey)) - } - } - i.Deposed[key] = i.Current - i.Current = nil - return key -} - -// GetGeneration retrieves the object of the given generation from the -// ResourceInstance, or returns nil if there is no such object. -// -// If the given generation is nil or invalid, this method will panic. -func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { - if gen == CurrentGen { - return i.Current - } - if dk, ok := gen.(DeposedKey); ok { - return i.Deposed[dk] - } - if gen == nil { - panic(fmt.Sprintf("get with nil Generation")) - } - // Should never fall out here, since the above covers all possible - // Generation values. - panic(fmt.Sprintf("get invalid Generation %#v", gen)) -} - -// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance at the time of the call. -// -// Note that the validity of this result may change if new deposed keys are -// allocated before it is used. To avoid this risk, instead use the -// DeposeResourceInstanceObject method on the SyncState wrapper type, which -// allocates a key and uses it atomically. -func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { - return i.findUnusedDeposedKey() -} - -// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance. -func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { - for { - key := NewDeposedKey() - if _, exists := i.Deposed[key]; !exists { - return key - } - // Spin until we find a unique one. This shouldn't take long, because - // we have a 32-bit keyspace and there's rarely more than one deposed - // instance. - } -} - -// DeposedKey is a 8-character hex string used to uniquely identify deposed -// instance objects in the state. -type DeposedKey string - -// NotDeposed is a special invalid value of DeposedKey that is used to represent -// the absense of a deposed key. It must not be used as an actual deposed key. -const NotDeposed = DeposedKey("") - -var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) - -// NewDeposedKey generates a pseudo-random deposed key. Because of the short -// length of these keys, uniqueness is not a natural consequence and so the -// caller should test to see if the generated key is already in use and generate -// another if so, until a unique key is found. -func NewDeposedKey() DeposedKey { - v := deposedKeyRand.Uint32() - return DeposedKey(fmt.Sprintf("%08x", v)) -} - -func (k DeposedKey) String() string { - return string(k) -} - -func (k DeposedKey) GoString() string { - ks := string(k) - switch { - case ks == "": - return "states.NotDeposed" - default: - return fmt.Sprintf("states.DeposedKey(%s)", ks) - } -} - -// Generation is a helper method to convert a DeposedKey into a Generation. -// If the reciever is anything other than NotDeposed then the result is -// just the same value as a Generation. If the receiver is NotDeposed then -// the result is CurrentGen. -func (k DeposedKey) Generation() Generation { - if k == NotDeposed { - return CurrentGen - } - return k -} - -// generation is an implementation of Generation. -func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/state.go b/vendor/github.com/hashicorp/terraform/states/state.go deleted file mode 100644 index 6d0cec14..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state.go +++ /dev/null @@ -1,298 +0,0 @@ -package states - -import ( - "sort" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/internal/getproviders" -) - -// State is the top-level type of a Terraform state. -// -// A state should be mutated only via its accessor methods, to ensure that -// invariants are preserved. -// -// Access to State and the nested values within it is not concurrency-safe, -// so when accessing a State object concurrently it is the caller's -// responsibility to ensure that only one write is in progress at a time -// and that reads only occur when no write is in progress. The most common -// way to acheive this is to wrap the State in a SyncState and use the -// higher-level atomic operations supported by that type. -type State struct { - // Modules contains the state for each module. The keys in this map are - // an implementation detail and must not be used by outside callers. - Modules map[string]*Module -} - -// NewState constructs a minimal empty state, containing an empty root module. -func NewState() *State { - modules := map[string]*Module{} - modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) - return &State{ - Modules: modules, - } -} - -// BuildState is a helper -- primarily intended for tests -- to build a state -// using imperative code against the StateSync type while still acting as -// an expression of type *State to assign into a containing struct. -func BuildState(cb func(*SyncState)) *State { - s := NewState() - cb(s.SyncWrapper()) - return s -} - -// Empty returns true if there are no resources or populated output values -// in the receiver. In other words, if this state could be safely replaced -// with the return value of NewState and be functionally equivalent. -func (s *State) Empty() bool { - if s == nil { - return true - } - for _, ms := range s.Modules { - if len(ms.Resources) != 0 { - return false - } - if len(ms.OutputValues) != 0 { - return false - } - } - return true -} - -// Module returns the state for the module with the given address, or nil if -// the requested module is not tracked in the state. -func (s *State) Module(addr addrs.ModuleInstance) *Module { - if s == nil { - panic("State.Module on nil *State") - } - return s.Modules[addr.String()] -} - -// ModuleInstances returns the set of Module states that matches the given path. -func (s *State) ModuleInstances(addr addrs.Module) []*Module { - var ms []*Module - for _, m := range s.Modules { - if m.Addr.Module().Equal(addr) { - ms = append(ms, m) - } - } - return ms -} - -// ModuleOutputs returns all outputs for the given module call under the -// parentAddr instance. -func (s *State) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue { - var os []*OutputValue - for _, m := range s.Modules { - // can't get outputs from the root module - if m.Addr.IsRoot() { - continue - } - - parent, call := m.Addr.Call() - // make sure this is a descendent in the correct path - if !parentAddr.Equal(parent) { - continue - } - - // and check if this is the correct child - if call.Name != module.Name { - continue - } - - for _, o := range m.OutputValues { - os = append(os, o) - } - } - - return os -} - -// RemoveModule removes the module with the given address from the state, -// unless it is the root module. The root module cannot be deleted, and so -// this method will panic if that is attempted. -// -// Removing a module implicitly discards all of the resources, outputs and -// local values within it, and so this should usually be done only for empty -// modules. For callers accessing the state through a SyncState wrapper, modules -// are automatically pruned if they are empty after one of their contained -// elements is removed. -func (s *State) RemoveModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - panic("attempted to remove root module") - } - - delete(s.Modules, addr.String()) -} - -// RootModule is a convenient alias for Module(addrs.RootModuleInstance). -func (s *State) RootModule() *Module { - if s == nil { - panic("RootModule called on nil State") - } - return s.Modules[addrs.RootModuleInstance.String()] -} - -// EnsureModule returns the state for the module with the given address, -// creating and adding a new one if necessary. -// -// Since this might modify the state to add a new instance, it is considered -// to be a write operation. -func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { - ms := s.Module(addr) - if ms == nil { - ms = NewModule(addr) - s.Modules[addr.String()] = ms - } - return ms -} - -// HasResources returns true if there is at least one resource (of any mode) -// present in the receiving state. -func (s *State) HasResources() bool { - if s == nil { - return false - } - for _, ms := range s.Modules { - if len(ms.Resources) > 0 { - return true - } - } - return false -} - -// Resource returns the state for the resource with the given address, or nil -// if no such resource is tracked in the state. -func (s *State) Resource(addr addrs.AbsResource) *Resource { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.Resource(addr.Resource) -} - -// Resources returns the set of resources that match the given configuration path. -func (s *State) Resources(addr addrs.ConfigResource) []*Resource { - var ret []*Resource - for _, m := range s.ModuleInstances(addr.Module) { - r := m.Resource(addr.Resource) - if r != nil { - ret = append(ret, r) - } - } - return ret -} - -// ResourceInstance returns the state for the resource instance with the given -// address, or nil if no such resource is tracked in the state. -func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - if s == nil { - panic("State.ResourceInstance on nil *State") - } - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.ResourceInstance(addr.Resource) -} - -// OutputValue returns the state for the output value with the given address, -// or nil if no such output value is tracked in the state. -func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.OutputValues[addr.OutputValue.Name] -} - -// LocalValue returns the value of the named local value with the given address, -// or cty.NilVal if no such value is tracked in the state. -func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { - ms := s.Module(addr.Module) - if ms == nil { - return cty.NilVal - } - return ms.LocalValues[addr.LocalValue.Name] -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving state. -// -// The result is de-duplicated so that each distinct address appears only once. -func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { - if s == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, ms := range s.Modules { - for _, rc := range ms.Resources { - m[rc.ProviderConfig.String()] = rc.ProviderConfig - } - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} - -// ProviderRequirements returns a description of all of the providers that -// are required to work with the receiving state. -// -// Because the state does not track specific version information for providers, -// the requirements returned by this method will always be unconstrained. -// The result should usually be merged with a Requirements derived from the -// current configuration in order to apply some constraints. -func (s *State) ProviderRequirements() getproviders.Requirements { - configAddrs := s.ProviderAddrs() - ret := make(getproviders.Requirements, len(configAddrs)) - for _, configAddr := range configAddrs { - ret[configAddr.Provider] = nil // unconstrained dependency - } - return ret -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// This should generally be used only after a "terraform destroy" operation, -// to finalize the cleanup of the state. It is not correct to use this after -// other operations because if a resource has "count = 0" or "for_each" over -// an empty collection then we want to retain it in the state so that references -// to it, particularly in "strange" contexts like "terraform console", can be -// properly resolved. -// -// This method MUST NOT be called concurrently with other readers and writers -// of the receiving state. -func (s *State) PruneResourceHusks() { - for _, m := range s.Modules { - m.PruneResourceHusks() - if len(m.Resources) == 0 && !m.Addr.IsRoot() { - s.RemoveModule(m.Addr) - } - } -} - -// SyncWrapper returns a SyncState object wrapping the receiver. -func (s *State) SyncWrapper() *SyncState { - return &SyncState{ - state: s, - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go deleted file mode 100644 index f6a3919c..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go +++ /dev/null @@ -1,225 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// Taking deep copies of states is an important operation because state is -// otherwise a mutable data structure that is challenging to share across -// many separate callers. It is important that the DeepCopy implementations -// in this file comprehensively copy all parts of the state data structure -// that could be mutated via pointers. - -// DeepCopy returns a new state that contains equivalent data to the reciever -// but shares no backing memory in common. -// -// As with all methods on State, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - modules := make(map[string]*Module, len(s.Modules)) - for k, m := range s.Modules { - modules[k] = m.DeepCopy() - } - return &State{ - Modules: modules, - } -} - -// DeepCopy returns a new module state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Module, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (ms *Module) DeepCopy() *Module { - if ms == nil { - return nil - } - - resources := make(map[string]*Resource, len(ms.Resources)) - for k, r := range ms.Resources { - resources[k] = r.DeepCopy() - } - outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) - for k, v := range ms.OutputValues { - outputValues[k] = v.DeepCopy() - } - localValues := make(map[string]cty.Value, len(ms.LocalValues)) - for k, v := range ms.LocalValues { - // cty.Value is immutable, so we don't need to copy these. - localValues[k] = v - } - - return &Module{ - Addr: ms.Addr, // technically mutable, but immutable by convention - Resources: resources, - OutputValues: outputValues, - LocalValues: localValues, - } -} - -// DeepCopy returns a new resource state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Resource, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (rs *Resource) DeepCopy() *Resource { - if rs == nil { - return nil - } - - instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) - for k, i := range rs.Instances { - instances[k] = i.DeepCopy() - } - - return &Resource{ - Addr: rs.Addr, - Instances: instances, - ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention - } -} - -// DeepCopy returns a new resource instance state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstance, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (is *ResourceInstance) DeepCopy() *ResourceInstance { - if is == nil { - return nil - } - - deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) - for k, obj := range is.Deposed { - deposed[k] = obj.DeepCopy() - } - - return &ResourceInstance{ - Current: is.Current.DeepCopy(), - Deposed: deposed, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObjectSrc, this method is not safe to -// use concurrently with writing to any portion of the recieving data structure. -// It is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { - if obj == nil { - return nil - } - - var attrsFlat map[string]string - if obj.AttrsFlat != nil { - attrsFlat = make(map[string]string, len(obj.AttrsFlat)) - for k, v := range obj.AttrsFlat { - attrsFlat[k] = v - } - } - - var attrsJSON []byte - if obj.AttrsJSON != nil { - attrsJSON = make([]byte, len(obj.AttrsJSON)) - copy(attrsJSON, obj.AttrsJSON) - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referencable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - var dependencies []addrs.ConfigResource - if obj.Dependencies != nil { - dependencies = make([]addrs.ConfigResource, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - } - - return &ResourceInstanceObjectSrc{ - Status: obj.Status, - SchemaVersion: obj.SchemaVersion, - Private: private, - AttrsFlat: attrsFlat, - AttrsJSON: attrsJSON, - Dependencies: dependencies, - CreateBeforeDestroy: obj.CreateBeforeDestroy, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObject, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { - if obj == nil { - return nil - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referenceable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - var dependencies []addrs.ConfigResource - if obj.Dependencies != nil { - dependencies = make([]addrs.ConfigResource, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - } - - return &ResourceInstanceObject{ - Value: obj.Value, - Status: obj.Status, - Private: private, - Dependencies: dependencies, - } -} - -// DeepCopy returns a new output value state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on OutputValue, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (os *OutputValue) DeepCopy() *OutputValue { - if os == nil { - return nil - } - - return &OutputValue{ - Addr: os.Addr, - Value: os.Value, - Sensitive: os.Sensitive, - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/state_equal.go b/vendor/github.com/hashicorp/terraform/states/state_equal.go deleted file mode 100644 index ea20967e..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state_equal.go +++ /dev/null @@ -1,18 +0,0 @@ -package states - -import ( - "reflect" -) - -// Equal returns true if the receiver is functionally equivalent to other, -// including any ephemeral portions of the state that would not be included -// if the state were saved to files. -// -// To test only the persistent portions of two states for equality, instead -// use statefile.StatesMarshalEqual. -func (s *State) Equal(other *State) bool { - // For the moment this is sufficient, but we may need to do something - // more elaborate in future if we have any portions of state that require - // more sophisticated comparisons. - return reflect.DeepEqual(s, other) -} diff --git a/vendor/github.com/hashicorp/terraform/states/state_string.go b/vendor/github.com/hashicorp/terraform/states/state_string.go deleted file mode 100644 index 680acf7a..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state_string.go +++ /dev/null @@ -1,277 +0,0 @@ -package states - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// String returns a rather-odd string representation of the entire state. -// -// This is intended to match the behavior of the older terraform.State.String -// method that is used in lots of existing tests. It should not be used in -// new tests: instead, use "cmp" to directly compare the state data structures -// and print out a diff if they do not match. -// -// This method should never be used in non-test code, whether directly by call -// or indirectly via a %s or %q verb in package fmt. -func (s *State) String() string { - if s == nil { - return "" - } - - // sort the modules by name for consistent output - modules := make([]string, 0, len(s.Modules)) - for m := range s.Modules { - modules = append(modules, m) - } - sort.Strings(modules) - - var buf bytes.Buffer - for _, name := range modules { - m := s.Modules[name] - mStr := m.testString() - - // If we're the root module, we just write the output directly. - if m.Addr.IsRoot() { - buf.WriteString(mStr + "\n") - continue - } - - // We need to build out a string that resembles the not-quite-standard - // format that terraform.State.String used to use, where there's a - // "module." prefix but then just a chain of all of the module names - // without any further "module." portions. - buf.WriteString("module") - for _, step := range m.Addr { - buf.WriteByte('.') - buf.WriteString(step.Name) - if step.InstanceKey != addrs.NoKey { - buf.WriteString(step.InstanceKey.String()) - } - } - buf.WriteString(":\n") - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// testString is used to produce part of the output of State.String. It should -// never be used directly. -func (m *Module) testString() string { - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - // We use AbsResourceInstance here, even though everything belongs to - // the same module, just because we have a sorting behavior defined - // for those but not for just ResourceInstance. - addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) - for _, rs := range m.Resources { - for ik := range rs.Instances { - addrsOrder = append(addrsOrder, rs.Addr.Instance(ik)) - } - } - - sort.Slice(addrsOrder, func(i, j int) bool { - return addrsOrder[i].Less(addrsOrder[j]) - }) - - for _, fakeAbsAddr := range addrsOrder { - addr := fakeAbsAddr.Resource - rs := m.Resource(addr.ContainingResource()) - is := m.ResourceInstance(addr) - - // Here we need to fake up a legacy-style address as the old state - // types would've used, since that's what our tests against those - // old types expect. The significant difference is that instancekey - // is dot-separated rather than using index brackets. - k := addr.ContainingResource().String() - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - k = fmt.Sprintf("%s.%d", k, tk) - default: - // No other key types existed for the legacy types, so we - // can do whatever we want here. We'll just use our standard - // syntax for these. - k = k + tk.String() - } - } - - id := LegacyInstanceObjectID(is.Current) - - taintStr := "" - if is.Current != nil && is.Current.Status == ObjectTainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(is.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) - - // Attributes were a flatmap before, but are not anymore. To preserve - // our old output as closely as possible we need to do a conversion - // to flatmap. Normally we'd want to do this with schema for - // accuracy, but for our purposes here it only needs to be approximate. - // This should produce an identical result for most cases, though - // in particular will differ in a few cases: - // - The keys used for elements in a set will be different - // - Values for attributes of type cty.DynamicPseudoType will be - // misinterpreted (but these weren't possible in old world anyway) - var attributes map[string]string - if obj := is.Current; obj != nil { - switch { - case obj.AttrsFlat != nil: - // Easy (but increasingly unlikely) case: the state hasn't - // actually been upgraded to the new form yet. - attributes = obj.AttrsFlat - case obj.AttrsJSON != nil: - ty, err := ctyjson.ImpliedType(obj.AttrsJSON) - if err == nil { - val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) - if err == nil { - attributes = hcl2shim.FlatmapValueFromHCL2(val) - } - } - } - } - attrKeys := make([]string, 0, len(attributes)) - for ak, val := range attributes { - if ak == "id" { - continue - } - - // don't show empty containers in the output - if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - // CAUTION: Since deposed keys are now random strings instead of - // incrementing integers, this result will not be deterministic - // if there is more than one deposed object. - i := 1 - for _, t := range is.Deposed { - id := LegacyInstanceObjectID(t) - taintStr := "" - if t.Status == ObjectTainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) - i++ - } - - if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range obj.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) - } - } - } - - if len(m.OutputValues) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - v := m.OutputValues[k] - lv := hcl2shim.ConfigValueFromHCL2(v.Value) - switch vTyped := lv.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - default: - buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) - } - } - } - - return buf.String() -} - -// LegacyInstanceObjectID is a helper for extracting an object id value from -// an instance object in a way that approximates how we used to do this -// for the old state types. ID is no longer first-class, so this is preserved -// only for compatibility with old tests that include the id as part of their -// expected value. -func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { - if obj == nil { - return "" - } - - if obj.AttrsJSON != nil { - type WithID struct { - ID string `json:"id"` - } - var withID WithID - err := json.Unmarshal(obj.AttrsJSON, &withID) - if err == nil { - return withID.ID - } - } else if obj.AttrsFlat != nil { - if flatID, exists := obj.AttrsFlat["id"]; exists { - return flatID - } - } - - // For resource types created after we removed id as special there may - // not actually be one at all. This is okay because older tests won't - // encounter this, and new tests shouldn't be using ids. - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go deleted file mode 100644 index a6d88ecd..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" -) - -const invalidFormat = "Invalid state file format" - -// jsonUnmarshalDiags is a helper that translates errors returned from -// json.Unmarshal into hopefully-more-helpful diagnostics messages. -func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if err == nil { - return diags - } - - switch tErr := err.(type) { - case *json.SyntaxError: - // We've usually already successfully parsed a source file as JSON at - // least once before we'd use jsonUnmarshalDiags with it (to sniff - // the version number) so this particular error should not appear much - // in practice. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - // This is likely to be the most common area, describing a - // non-conformance between the file and the expected file format - // at a semantic level. - if tErr.Field != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), - )) - break - } else { - // Without a field name, we can't really say anything helpful. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - "The state file does not conform to the expected JSON data structure.", - )) - } - default: - // Fallback for all other types of errors. This can happen only for - // custom UnmarshalJSON implementations, so should be encountered - // only rarely. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), - )) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go deleted file mode 100644 index 625d0cf4..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package statefile deals with the file format used to serialize states for -// persistent storage and then deserialize them into memory again later. -package statefile diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/file.go b/vendor/github.com/hashicorp/terraform/states/statefile/file.go deleted file mode 100644 index 6e202401..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/file.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/states" - tfversion "github.com/hashicorp/terraform/version" -) - -// File is the in-memory representation of a state file. It includes the state -// itself along with various metadata used to track changing state files for -// the same configuration over time. -type File struct { - // TerraformVersion is the version of Terraform that wrote this state file. - TerraformVersion *version.Version - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial uint64 - - // Lineage is set when a new, blank state file is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string - - // State is the actual state represented by this file. - State *states.State -} - -func New(state *states.State, lineage string, serial uint64) *File { - // To make life easier on callers, we'll accept a nil state here and just - // allocate an empty one, which is required for this file to be successfully - // written out. - if state == nil { - state = states.NewState() - } - - return &File{ - TerraformVersion: tfversion.SemVer, - State: state, - Lineage: lineage, - Serial: serial, - } -} - -// DeepCopy is a convenience method to create a new File object whose state -// is a deep copy of the receiver's, as implemented by states.State.DeepCopy. -func (f *File) DeepCopy() *File { - if f == nil { - return nil - } - return &File{ - TerraformVersion: f.TerraformVersion, - Serial: f.Serial, - Lineage: f.Lineage, - State: f.State.DeepCopy(), - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go deleted file mode 100644 index 4948b39b..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go +++ /dev/null @@ -1,40 +0,0 @@ -package statefile - -import ( - "bytes" - - "github.com/hashicorp/terraform/states" -) - -// StatesMarshalEqual returns true if and only if the two given states have -// an identical (byte-for-byte) statefile representation. -// -// This function compares only the portions of the state that are persisted -// in state files, so for example it will not return false if the only -// differences between the two states are local values or descendent module -// outputs. -func StatesMarshalEqual(a, b *states.State) bool { - var aBuf bytes.Buffer - var bBuf bytes.Buffer - - // nil states are not valid states, and so they can never martial equal. - if a == nil || b == nil { - return false - } - - // We write here some temporary files that have no header information - // populated, thus ensuring that we're only comparing the state itself - // and not any metadata. - err := Write(&File{State: a}, &aBuf) - if err != nil { - // Should never happen, because we're writing to an in-memory buffer - panic(err) - } - err = Write(&File{State: b}, &bBuf) - if err != nil { - // Should never happen, because we're writing to an in-memory buffer - panic(err) - } - - return bytes.Equal(aBuf.Bytes(), bBuf.Bytes()) -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/read.go b/vendor/github.com/hashicorp/terraform/states/statefile/read.go deleted file mode 100644 index d691c029..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/read.go +++ /dev/null @@ -1,209 +0,0 @@ -package statefile - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/tfdiags" - tfversion "github.com/hashicorp/terraform/version" -) - -// ErrNoState is returned by ReadState when the state file is empty. -var ErrNoState = errors.New("no state") - -// Read reads a state from the given reader. -// -// Legacy state format versions 1 through 3 are supported, but the result will -// contain object attributes in the deprecated "flatmap" format and so must -// be upgraded by the caller before use. -// -// If the state file is empty, the special error value ErrNoState is returned. -// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics -// potentially describing multiple errors. -func Read(r io.Reader) (*File, error) { - // Some callers provide us a "typed nil" *os.File here, which would - // cause us to panic below if we tried to use it. - if f, ok := r.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - var diags tfdiags.Diagnostics - - // We actually just buffer the whole thing in memory, because states are - // generally not huge and we need to do be able to sniff for a version - // number before full parsing. - src, err := ioutil.ReadAll(r) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read state file", - fmt.Sprintf("The state file could not be read: %s", err), - )) - return nil, diags.Err() - } - - if len(src) == 0 { - return nil, ErrNoState - } - - state, diags := readState(src) - if diags.HasErrors() { - return nil, diags.Err() - } - - if state == nil { - // Should never happen - panic("readState returned nil state with no errors") - } - - if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { - return state, fmt.Errorf( - "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", - state.TerraformVersion, - tfversion.SemVer, - state.TerraformVersion, - ) - } - - return state, diags.Err() -} - -func readState(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if looksLikeVersion0(src) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", - )) - return nil, diags - } - - version, versionDiags := sniffJSONStateVersion(src) - diags = diags.Append(versionDiags) - if versionDiags.HasErrors() { - return nil, diags - } - - switch version { - case 0: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", - )) - return nil, diags - case 1: - return readStateV1(src) - case 2: - return readStateV2(src) - case 3: - return readStateV3(src) - case 4: - return readStateV4(src) - default: - thisVersion := tfversion.SemVer.String() - creatingVersion := sniffJSONStateTerraformVersion(src) - switch { - case creatingVersion != "": - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), - )) - } - return nil, diags - } -} - -func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - type VersionSniff struct { - Version *uint64 `json:"version"` - } - var sniff VersionSniff - err := json.Unmarshal(src, &sniff) - if err != nil { - switch tErr := err.(type) { - case *json.SyntaxError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file could not be parsed as JSON.", - )) - } - } - - if sniff.Version == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file does not have a \"version\" attribute, which is required to identify the format version.", - )) - return 0, diags - } - - return *sniff.Version, diags -} - -// sniffJSONStateTerraformVersion attempts to sniff the Terraform version -// specification from the given state file source code. The result is either -// a version string or an empty string if no version number could be extracted. -// -// This is a best-effort function intended to produce nicer error messages. It -// should not be used for any real processing. -func sniffJSONStateTerraformVersion(src []byte) string { - type VersionSniff struct { - Version string `json:"terraform_version"` - } - var sniff VersionSniff - - err := json.Unmarshal(src, &sniff) - if err != nil { - return "" - } - - // Attempt to parse the string as a version so we won't report garbage - // as a version number. - _, err = version.NewVersion(sniff.Version) - if err != nil { - return "" - } - - return sniff.Version -} - -// unsupportedFormat is a diagnostic summary message for when the state file -// seems to not be a state file at all, or is not a supported version. -// -// Use invalidFormat instead for the subtly-different case of "this looks like -// it's intended to be a state file but it's not structured correctly". -const unsupportedFormat = "Unsupported state file format" - -const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go deleted file mode 100644 index 9b533317..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go +++ /dev/null @@ -1,23 +0,0 @@ -package statefile - -// looksLikeVersion0 sniffs for the signature indicating a version 0 state -// file. -// -// Version 0 was the number retroactively assigned to Terraform's initial -// (unversioned) binary state file format, which was later superseded by the -// version 1 format in JSON. -// -// Version 0 is no longer supported, so this is used only to detect it and -// return a nice error to the user. -func looksLikeVersion0(src []byte) bool { - // Version 0 files begin with the magic prefix "tfstate". - const magic = "tfstate" - if len(src) < len(magic) { - // Not even long enough to have the magic prefix - return false - } - if string(src[0:len(magic)]) == magic { - return true - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go deleted file mode 100644 index 80d711bc..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go +++ /dev/null @@ -1,174 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV1 := &stateV1{} - err := json.Unmarshal(src, sV1) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV1(sV1) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2, err := upgradeStateV1ToV2(sV1) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV1 is a representation of the legacy JSON state format version 1. -// -// It is only used to read version 1 JSON files prior to upgrading them to -// the current format. -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go deleted file mode 100644 index 0b417e1c..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go +++ /dev/null @@ -1,172 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { - log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*moduleStateV2, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &stateV2{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &remoteStateV2{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root. - path = []string{"root"} - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*outputStateV2) - for key, output := range old.Outputs { - outputs[key] = &outputStateV2{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*resourceStateV2) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &moduleStateV2{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*instanceStateV2, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &resourceStateV2{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &instanceStateV2{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Meta: newMeta, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go deleted file mode 100644 index be93924a..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go +++ /dev/null @@ -1,209 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2 := &stateV2{} - err := json.Unmarshal(src, sV2) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3, err := upgradeStateV2ToV3(sV2) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 2. -// -// It is only used to read version 2 JSON files prior to upgrading them to -// the current format. -type stateV2 struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV2 `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *backendStateV2 `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV2 `json:"modules"` -} - -type remoteStateV2 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type outputStateV2 struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -type moduleStateV2 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*outputStateV2 `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV2 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` -} - -type resourceStateV2 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV2 `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*instanceStateV2 `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -type instanceStateV2 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` -} - -type backendStateV2 struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go deleted file mode 100644 index 2d03c07c..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go +++ /dev/null @@ -1,145 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" -) - -func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { - if old == nil { - return (*stateV3)(nil), nil - } - - var new *stateV3 - { - copy, err := copystructure.Config{Lock: true}.Copy(old) - if err != nil { - panic(err) - } - newWrongType := copy.(*stateV2) - newRightType := (stateV3)(*newWrongType) - new = &newRightType - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go deleted file mode 100644 index ab6414b0..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go +++ /dev/null @@ -1,50 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3 := &stateV3{} - err := json.Unmarshal(src, sV3) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4, err := upgradeStateV3ToV4(sV3) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 3. -// -// It is only used to read version 3 JSON files prior to upgrading them to -// the current format. -// -// The differences between version 2 and version 3 are only in the data and -// not in the structure, so stateV3 actually shares the same structs as -// stateV2. Type stateV3 represents that the data within is formatted as -// expected by the V3 format, rather than the V2 format. -type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go deleted file mode 100644 index e54a08cc..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go +++ /dev/null @@ -1,500 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { - - if old.Serial < 0 { - // The new format is using uint64 here, which should be fine for any - // real state (we only used positive integers in practice) but we'll - // catch this explicitly here to avoid weird behavior if a state file - // has been tampered with in some way. - return nil, fmt.Errorf("state has serial less than zero, which is invalid") - } - - new := &stateV4{ - TerraformVersion: old.TFVersion, - Serial: uint64(old.Serial), - Lineage: old.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - if new.TerraformVersion == "" { - // Older formats considered this to be optional, but now it's required - // and so we'll stub it out with something that's definitely older - // than the version that really created this state. - new.TerraformVersion = "0.0.0" - } - - for _, msOld := range old.Modules { - if len(msOld.Path) < 1 || msOld.Path[0] != "root" { - return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) - } - - // Convert legacy-style module address into our newer address type. - // Since these old formats are only generated by versions of Terraform - // that don't support count and for_each on modules, we can just assume - // all of the modules are unkeyed. - moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) - for i, name := range msOld.Path[1:] { - if !hclsyntax.ValidIdentifier(name) { - // If we don't fail here then we'll produce an invalid state - // version 4 which subsequent operations will reject, so we'll - // fail early here for safety to make sure we can never - // inadvertently commit an invalid snapshot to a backend. - return nil, fmt.Errorf("state contains invalid module path %#v: %q is not a valid identifier; rename it in Terraform 0.11 before upgrading to Terraform 0.12", msOld.Path, name) - } - moduleAddr[i] = addrs.ModuleInstanceStep{ - Name: name, - InstanceKey: addrs.NoKey, - } - } - - // In a v3 state file, a "resource state" is actually an instance - // state, so we need to fill in a missing level of heirarchy here - // by lazily creating resource states as we encounter them. - // We'll track them in here, keyed on the string representation of - // the resource address. - resourceStates := map[string]*resourceStateV4{} - - for legacyAddr, rsOld := range msOld.Resources { - instAddr, err := parseLegacyResourceAddress(legacyAddr) - if err != nil { - return nil, err - } - - resAddr := instAddr.Resource - rs, exists := resourceStates[resAddr.String()] - if !exists { - var modeStr string - switch resAddr.Mode { - case addrs.ManagedResourceMode: - modeStr = "managed" - case addrs.DataResourceMode: - modeStr = "data" - default: - return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) - } - - // In state versions prior to 4 we allowed each instance of a - // resource to have its own provider configuration address, - // which makes no real sense in practice because providers - // are associated with resources in the configuration. We - // elevate that to the resource level during this upgrade, - // implicitly taking the provider address of the first instance - // we encounter for each resource. While this is lossy in - // theory, in practice there is no reason for these values to - // differ between instances. - var providerAddr addrs.AbsProviderConfig - oldProviderAddr := rsOld.Provider - if strings.Contains(oldProviderAddr, "provider.") { - // Smells like a new-style provider address, but we'll test it. - var diags tfdiags.Diagnostics - providerAddr, diags = addrs.ParseLegacyAbsProviderConfigStr(oldProviderAddr) - if diags.HasErrors() { - if strings.Contains(oldProviderAddr, "${") { - // There seems to be a common misconception that - // interpolation was valid in provider aliases - // in 0.11, so we'll use a specialized error - // message for that case. - return nil, fmt.Errorf("invalid provider config reference %q for %s: this alias seems to contain a template interpolation sequence, which was not supported but also not error-checked in Terraform 0.11. To proceed, rename the associated provider alias to a valid identifier and apply the change with Terraform 0.11 before upgrading to Terraform 0.12", oldProviderAddr, instAddr) - } - return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - } else { - // Smells like an old-style module-local provider address, - // which we'll need to migrate. We'll assume it's referring - // to the same module the resource is in, which might be - // incorrect but it'll get fixed up next time any updates - // are made to an instance. - if oldProviderAddr != "" { - localAddr, diags := configs.ParseProviderConfigCompactStr(oldProviderAddr) - if diags.HasErrors() { - if strings.Contains(oldProviderAddr, "${") { - // There seems to be a common misconception that - // interpolation was valid in provider aliases - // in 0.11, so we'll use a specialized error - // message for that case. - return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: this alias seems to contain a template interpolation sequence, which was not supported but also not error-checked in Terraform 0.11. To proceed, rename the associated provider alias to a valid identifier and apply the change with Terraform 0.11 before upgrading to Terraform 0.12", oldProviderAddr, instAddr) - } - return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - providerAddr = addrs.AbsProviderConfig{ - Module: moduleAddr.Module(), - // We use NewLegacyProvider here so we can use - // LegacyString() below to get the appropriate - // legacy-style provider string. - Provider: addrs.NewLegacyProvider(localAddr.LocalName), - Alias: localAddr.Alias, - } - } else { - providerAddr = addrs.AbsProviderConfig{ - Module: moduleAddr.Module(), - // We use NewLegacyProvider here so we can use - // LegacyString() below to get the appropriate - // legacy-style provider string. - Provider: addrs.NewLegacyProvider(resAddr.ImpliedProvider()), - } - } - } - - rs = &resourceStateV4{ - Module: moduleAddr.String(), - Mode: modeStr, - Type: resAddr.Type, - Name: resAddr.Name, - Instances: []instanceObjectStateV4{}, - ProviderConfig: providerAddr.LegacyString(), - } - resourceStates[resAddr.String()] = rs - } - - // Now we'll deal with the instance itself, which may either be - // the first instance in a resource we just created or an additional - // instance for a resource added on a prior loop. - instKey := instAddr.Key - if isOld := rsOld.Primary; isOld != nil { - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) - if err != nil { - return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - for i, isOld := range rsOld.Deposed { - // When we migrate old instances we'll use sequential deposed - // keys just so that the upgrade result is deterministic. New - // deposed keys allocated moving forward will be pseudorandomly - // selected, but we check for collisions and so these - // non-random ones won't hurt. - deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) - if err != nil { - return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - - if instKey != addrs.NoKey && rs.EachMode == "" { - rs.EachMode = "list" - } - } - - for _, rs := range resourceStates { - new.Resources = append(new.Resources, *rs) - } - - if len(msOld.Path) == 1 && msOld.Path[0] == "root" { - // We'll migrate the outputs for this module too, then. - for name, oldOS := range msOld.Outputs { - newOS := outputStateV4{ - Sensitive: oldOS.Sensitive, - } - - valRaw := oldOS.Value - valSrc, err := json.Marshal(valRaw) - if err != nil { - // Should never happen, because this value came from JSON - // in the first place and so we're just round-tripping here. - return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) - } - - // The "type" field in state V2 wasn't really that useful - // since it was only able to capture string vs. list vs. map. - // For this reason, during upgrade we'll just discard it - // altogether and use cty's idea of the implied type of - // turning our old value into JSON. - ty, err := ctyjson.ImpliedType(valSrc) - if err != nil { - // REALLY should never happen, because we literally just - // encoded this as JSON above! - return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) - } - - // ImpliedType tends to produce structural types, but since older - // version of Terraform didn't support those a collection type - // is probably what was intended, so we'll see if we can - // interpret our value as one. - ty = simplifyImpliedValueType(ty) - - tySrc, err := ctyjson.MarshalType(ty) - if err != nil { - return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) - } - - newOS.ValueRaw = json.RawMessage(valSrc) - newOS.ValueTypeRaw = json.RawMessage(tySrc) - - new.RootOutputs[name] = newOS - } - } - } - - new.normalize() - - return new, nil -} - -func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { - - // Schema versions were, in prior formats, a private concern of the provider - // SDK, and not a first-class concept in the state format. Here we're - // sniffing for the pre-0.12 SDK's way of representing schema versions - // and promoting it to our first-class field if we find it. We'll ignore - // it if it doesn't look like what the SDK would've written. If this - // sniffing fails then we'll assume schema version 0. - var schemaVersion uint64 - migratedSchemaVersion := false - if raw, exists := isOld.Meta["schema_version"]; exists { - switch tv := raw.(type) { - case string: - v, err := strconv.ParseUint(tv, 10, 64) - if err == nil { - schemaVersion = v - migratedSchemaVersion = true - } - case int: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - case float64: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - } - } - - private := map[string]interface{}{} - for k, v := range isOld.Meta { - if k == "schema_version" && migratedSchemaVersion { - // We're gonna promote this into our first-class schema version field - continue - } - private[k] = v - } - var privateJSON []byte - if len(private) != 0 { - var err error - privateJSON, err = json.Marshal(private) - if err != nil { - // This shouldn't happen, because the Meta values all came from JSON - // originally anyway. - return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) - } - } - - var status string - if isOld.Tainted { - status = "tainted" - } - - var instKeyRaw interface{} - switch tk := instKey.(type) { - case addrs.IntKey: - instKeyRaw = int(tk) - case addrs.StringKey: - instKeyRaw = string(tk) - default: - if instKeyRaw != nil { - return nil, fmt.Errorf("unsupported instance key: %#v", instKey) - } - } - - var attributes map[string]string - if isOld.Attributes != nil { - attributes = make(map[string]string, len(isOld.Attributes)) - for k, v := range isOld.Attributes { - attributes[k] = v - } - } - if isOld.ID != "" { - // As a special case, if we don't already have an "id" attribute and - // yet there's a non-empty first-class ID on the old object then we'll - // create a synthetic id attribute to avoid losing that first-class id. - // In practice this generally arises only in tests where state literals - // are hand-written in a non-standard way; real code prior to 0.12 - // would always force the first-class ID to be copied into the - // id attribute before storing. - if attributes == nil { - attributes = make(map[string]string, len(isOld.Attributes)) - } - if idVal := attributes["id"]; idVal == "" { - attributes["id"] = isOld.ID - } - } - - dependencies := make([]string, 0, len(rsOld.Dependencies)) - for _, v := range rsOld.Dependencies { - depStr, err := parseLegacyDependency(v) - if err != nil { - // We just drop invalid dependencies on the floor here, because - // they tend to get left behind in Terraform 0.11 when resources - // are renamed or moved between modules and there's no automatic - // way to fix them here. In practice it shouldn't hurt to miss - // a few dependency edges in the state because a subsequent plan - // will run a refresh walk first and re-synchronize the - // dependencies with the configuration. - // - // There is one rough edges where this can cause an incorrect - // result, though: If the first command the user runs after - // upgrading to Terraform 0.12 uses -refresh=false and thus - // prevents the dependency reorganization from occurring _and_ - // that initial plan discovered "orphaned" resources (not present - // in configuration any longer) then when the plan is applied the - // destroy ordering will be incorrect for the instances of those - // resources. We expect that is a rare enough situation that it - // isn't a big deal, and even when it _does_ occur it's common for - // the apply to succeed anyway unless many separate resources with - // complex inter-dependencies are all orphaned at once. - log.Printf("statefile: ignoring invalid dependency address %q while upgrading from state version 3 to version 4: %s", v, err) - continue - } - dependencies = append(dependencies, depStr) - } - - return &instanceObjectStateV4{ - IndexKey: instKeyRaw, - Status: status, - Deposed: string(deposedKey), - AttributesFlat: attributes, - SchemaVersion: schemaVersion, - PrivateRaw: privateJSON, - }, nil -} - -// parseLegacyResourceAddress parses the different identifier format used -// state formats before version 4, like "instance.name.0". -func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { - var ret addrs.ResourceInstance - - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - ret.Resource.Mode = addrs.ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - ret.Resource.Mode = addrs.DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - ret.Resource.Type = parts[0] - ret.Resource.Name = parts[1] - ret.Key = addrs.NoKey - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) - } - - ret.Key = addrs.IntKey(idx) - } - - return ret, nil -} - -// simplifyImpliedValueType attempts to heuristically simplify a value type -// derived from a legacy stored output value into something simpler that -// is closer to what would've fitted into the pre-v0.12 value type system. -func simplifyImpliedValueType(ty cty.Type) cty.Type { - switch { - case ty.IsTupleType(): - // If all of the element types are the same then we'll make this - // a list instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyTuple) { - // Don't know what the element type would be, then. - return ty - } - - etys := ty.TupleElementTypes() - ety := etys[0] - for _, other := range etys[1:] { - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.List(ety) - - case ty.IsObjectType(): - // If all of the attribute types are the same then we'll make this - // a map instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyObject) { - // Don't know what the element type would be, then. - return ty - } - - atys := ty.AttributeTypes() - var ety cty.Type - for _, other := range atys { - if ety == cty.NilType { - ety = other - continue - } - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.Map(ety) - - default: - // No other normalizations are possible - return ty - } -} - -func parseLegacyDependency(s string) (string, error) { - parts := strings.Split(s, ".") - ret := parts[0] - for _, part := range parts[1:] { - if part == "*" { - break - } - if i, err := strconv.Atoi(part); err == nil { - ret = ret + fmt.Sprintf("[%d]", i) - break - } - ret = ret + "." + part - } - - // The result must parse as a reference, or else we'll create an invalid - // state file. - var diags tfdiags.Diagnostics - _, diags = addrs.ParseRefStr(ret) - if diags.HasErrors() { - return "", diags.Err() - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go deleted file mode 100644 index 04701f49..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go +++ /dev/null @@ -1,576 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "io" - "sort" - - version "github.com/hashicorp/go-version" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4 := &stateV4{} - err := json.Unmarshal(src, sV4) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var tfVersion *version.Version - if sV4.TerraformVersion != "" { - var err error - tfVersion, err = version.NewVersion(sV4.TerraformVersion) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid Terraform version string", - fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), - )) - } - } - - file := &File{ - TerraformVersion: tfVersion, - Serial: sV4.Serial, - Lineage: sV4.Lineage, - } - - state := states.NewState() - - for _, rsV4 := range sV4.Resources { - rAddr := addrs.Resource{ - Type: rsV4.Type, - Name: rsV4.Name, - } - switch rsV4.Mode { - case "managed": - rAddr.Mode = addrs.ManagedResourceMode - case "data": - rAddr.Mode = addrs.DataResourceMode - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource mode in state", - fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), - )) - continue - } - - moduleAddr := addrs.RootModuleInstance - if rsV4.Module != "" { - var addrDiags tfdiags.Diagnostics - moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) - diags = diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - } - - providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) - diags.Append(addrDiags) - if addrDiags.HasErrors() { - // If ParseAbsProviderConfigStr returns an error, the state may have - // been written before Provider FQNs were introduced and the - // AbsProviderConfig string format will need normalization. If so, - // we assume it is a default (hashicorp) provider. - var legacyAddrDiags tfdiags.Diagnostics - providerAddr, legacyAddrDiags = addrs.ParseLegacyAbsProviderConfigStr(rsV4.ProviderConfig) - if legacyAddrDiags.HasErrors() { - continue - } - } - - ms := state.EnsureModule(moduleAddr) - - // Ensure the resource container object is present in the state. - ms.SetResourceProvider(rAddr, providerAddr) - - for _, isV4 := range rsV4.Instances { - keyRaw := isV4.IndexKey - var key addrs.InstanceKey - switch tk := keyRaw.(type) { - case int: - key = addrs.IntKey(tk) - case float64: - // Since JSON only has one number type, reading from encoding/json - // gives us a float64 here even if the number is whole. - // float64 has a smaller integer range than int, but in practice - // we rarely have more than a few tens of instances and so - // it's unlikely that we'll exhaust the 52 bits in a float64. - key = addrs.IntKey(int(tk)) - case string: - key = addrs.StringKey(tk) - default: - if keyRaw != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), - )) - continue - } - key = addrs.NoKey - } - - instAddr := rAddr.Instance(key) - - obj := &states.ResourceInstanceObjectSrc{ - SchemaVersion: isV4.SchemaVersion, - CreateBeforeDestroy: isV4.CreateBeforeDestroy, - } - - { - // Instance attributes - switch { - case isV4.AttributesRaw != nil: - obj.AttrsJSON = isV4.AttributesRaw - case isV4.AttributesFlat != nil: - obj.AttrsFlat = isV4.AttributesFlat - default: - // This is odd, but we'll accept it and just treat the - // object has being empty. In practice this should arise - // only from the contrived sort of state objects we tend - // to hand-write inline in tests. - obj.AttrsJSON = []byte{'{', '}'} - } - } - - { - // Status - raw := isV4.Status - switch raw { - case "": - obj.Status = states.ObjectReady - case "tainted": - obj.Status = states.ObjectTainted - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), - )) - continue - } - } - - if raw := isV4.PrivateRaw; len(raw) > 0 { - obj.Private = raw - } - - { - depsRaw := isV4.Dependencies - deps := make([]addrs.ConfigResource, 0, len(depsRaw)) - for _, depRaw := range depsRaw { - addr, addrDiags := addrs.ParseAbsResourceStr(depRaw) - diags = diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - deps = append(deps, addr.Config()) - } - obj.Dependencies = deps - } - - switch { - case isV4.Deposed != "": - dk := states.DeposedKey(isV4.Deposed) - if len(dk) != 8 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), - )) - continue - } - is := ms.ResourceInstance(instAddr) - if is.HasDeposed(dk) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), - )) - continue - } - - ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) - default: - is := ms.ResourceInstance(instAddr) - if is.HasCurrent() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), - )) - continue - } - - ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) - } - } - - // We repeat this after creating the instances because - // SetResourceInstanceCurrent automatically resets this metadata based - // on the incoming objects. That behavior is useful when we're making - // piecemeal updates to the state during an apply, but when we're - // reading the state file we want to reflect its contents exactly. - ms.SetResourceProvider(rAddr, providerAddr) - } - - // The root module is special in that we persist its attributes and thus - // need to reload them now. (For descendent modules we just re-calculate - // them based on the latest configuration on each run.) - { - rootModule := state.RootModule() - for name, fos := range sV4.RootOutputs { - os := &states.OutputValue{ - Addr: addrs.AbsOutputValue{ - OutputValue: addrs.OutputValue{ - Name: name, - }, - }, - } - os.Sensitive = fos.Sensitive - - ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value type in state", - fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), - )) - continue - } - - val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value saved in state", - fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), - )) - continue - } - - os.Value = val - rootModule.OutputValues[name] = os - } - } - - file.State = state - return file, diags -} - -func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { - // Here we'll convert back from the "File" representation to our - // stateV4 struct representation and write that. - // - // While we support legacy state formats for reading, we only support the - // latest for writing and so if a V5 is added in future then this function - // should be deleted and replaced with a writeStateV5, even though the - // read/prepare V4 functions above would stick around. - - var diags tfdiags.Diagnostics - if file == nil || file.State == nil { - panic("attempt to write nil state to file") - } - - var terraformVersion string - if file.TerraformVersion != nil { - terraformVersion = file.TerraformVersion.String() - } - - sV4 := &stateV4{ - TerraformVersion: terraformVersion, - Serial: file.Serial, - Lineage: file.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - for name, os := range file.State.RootModule().OutputValues { - src, err := ctyjson.Marshal(os.Value, os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), - )) - continue - } - - typeSrc, err := ctyjson.MarshalType(os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), - )) - continue - } - - sV4.RootOutputs[name] = outputStateV4{ - Sensitive: os.Sensitive, - ValueRaw: json.RawMessage(src), - ValueTypeRaw: json.RawMessage(typeSrc), - } - } - - for _, ms := range file.State.Modules { - moduleAddr := ms.Addr - for _, rs := range ms.Resources { - resourceAddr := rs.Addr.Resource - - var mode string - switch resourceAddr.Mode { - case addrs.ManagedResourceMode: - mode = "managed" - case addrs.DataResourceMode: - mode = "data" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), - )) - continue - } - - sV4.Resources = append(sV4.Resources, resourceStateV4{ - Module: moduleAddr.String(), - Mode: mode, - Type: resourceAddr.Type, - Name: resourceAddr.Name, - ProviderConfig: rs.ProviderConfig.String(), - Instances: []instanceObjectStateV4{}, - }) - rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) - - for key, is := range rs.Instances { - if is.HasCurrent() { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, is.Current, states.NotDeposed, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - for dk, obj := range is.Deposed { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, obj, dk, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - } - } - } - - sV4.normalize() - - src, err := json.MarshalIndent(sV4, "", " ") - if err != nil { - // Shouldn't happen if we do our conversion to *stateV4 correctly above. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize state", - fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), - )) - return diags - } - src = append(src, '\n') - - _, err = w.Write(src) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write state", - fmt.Sprintf("An error occured while writing the serialized state: %s.", err), - )) - return diags - } - - return diags -} - -func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var status string - switch obj.Status { - case states.ObjectReady: - status = "" - case states.ObjectTainted: - status = "tainted" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), - )) - } - - var privateRaw []byte - if len(obj.Private) > 0 { - privateRaw = obj.Private - } - - deps := make([]string, len(obj.Dependencies)) - for i, depAddr := range obj.Dependencies { - deps[i] = depAddr.String() - } - - var rawKey interface{} - switch tk := key.(type) { - case addrs.IntKey: - rawKey = int(tk) - case addrs.StringKey: - rawKey = string(tk) - default: - if key != addrs.NoKey { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), - )) - } - } - - return append(isV4s, instanceObjectStateV4{ - IndexKey: rawKey, - Deposed: string(deposed), - Status: status, - SchemaVersion: obj.SchemaVersion, - AttributesFlat: obj.AttrsFlat, - AttributesRaw: obj.AttrsJSON, - PrivateRaw: privateRaw, - Dependencies: deps, - CreateBeforeDestroy: obj.CreateBeforeDestroy, - }), diags -} - -type stateV4 struct { - Version stateVersionV4 `json:"version"` - TerraformVersion string `json:"terraform_version"` - Serial uint64 `json:"serial"` - Lineage string `json:"lineage"` - RootOutputs map[string]outputStateV4 `json:"outputs"` - Resources []resourceStateV4 `json:"resources"` -} - -// normalize makes some in-place changes to normalize the way items are -// stored to ensure that two functionally-equivalent states will be stored -// identically. -func (s *stateV4) normalize() { - sort.Stable(sortResourcesV4(s.Resources)) - for _, rs := range s.Resources { - sort.Stable(sortInstancesV4(rs.Instances)) - } -} - -type outputStateV4 struct { - ValueRaw json.RawMessage `json:"value"` - ValueTypeRaw json.RawMessage `json:"type"` - Sensitive bool `json:"sensitive,omitempty"` -} - -type resourceStateV4 struct { - Module string `json:"module,omitempty"` - Mode string `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - EachMode string `json:"each,omitempty"` - ProviderConfig string `json:"provider"` - Instances []instanceObjectStateV4 `json:"instances"` -} - -type instanceObjectStateV4 struct { - IndexKey interface{} `json:"index_key,omitempty"` - Status string `json:"status,omitempty"` - Deposed string `json:"deposed,omitempty"` - - SchemaVersion uint64 `json:"schema_version"` - AttributesRaw json.RawMessage `json:"attributes,omitempty"` - AttributesFlat map[string]string `json:"attributes_flat,omitempty"` - - PrivateRaw []byte `json:"private,omitempty"` - - Dependencies []string `json:"dependencies,omitempty"` - - CreateBeforeDestroy bool `json:"create_before_destroy,omitempty"` -} - -// stateVersionV4 is a weird special type we use to produce our hard-coded -// "version": 4 in the JSON serialization. -type stateVersionV4 struct{} - -func (sv stateVersionV4) MarshalJSON() ([]byte, error) { - return []byte{'4'}, nil -} - -func (sv stateVersionV4) UnmarshalJSON([]byte) error { - // Nothing to do: we already know we're version 4 - return nil -} - -type sortResourcesV4 []resourceStateV4 - -func (sr sortResourcesV4) Len() int { return len(sr) } -func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } -func (sr sortResourcesV4) Less(i, j int) bool { - switch { - case sr[i].Mode != sr[j].Mode: - return sr[i].Mode < sr[j].Mode - case sr[i].Type != sr[j].Type: - return sr[i].Type < sr[j].Type - case sr[i].Name != sr[j].Name: - return sr[i].Name < sr[j].Name - default: - return false - } -} - -type sortInstancesV4 []instanceObjectStateV4 - -func (si sortInstancesV4) Len() int { return len(si) } -func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } -func (si sortInstancesV4) Less(i, j int) bool { - ki := si[i].IndexKey - kj := si[j].IndexKey - if ki != kj { - if (ki == nil) != (kj == nil) { - return ki == nil - } - if kii, isInt := ki.(int); isInt { - if kji, isInt := kj.(int); isInt { - return kii < kji - } - return true - } - if kis, isStr := ki.(string); isStr { - if kjs, isStr := kj.(string); isStr { - return kis < kjs - } - return true - } - } - if si[i].Deposed != si[j].Deposed { - return si[i].Deposed < si[j].Deposed - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/write.go b/vendor/github.com/hashicorp/terraform/states/statefile/write.go deleted file mode 100644 index 548ba8a8..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/write.go +++ /dev/null @@ -1,17 +0,0 @@ -package statefile - -import ( - "io" - - tfversion "github.com/hashicorp/terraform/version" -) - -// Write writes the given state to the given writer in the current state -// serialization format. -func Write(s *File, w io.Writer) error { - // Always record the current terraform version in the state. - s.TerraformVersion = tfversion.SemVer - - diags := writeStateV4(s, w) - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform/states/sync.go b/vendor/github.com/hashicorp/terraform/states/sync.go deleted file mode 100644 index d0923b15..00000000 --- a/vendor/github.com/hashicorp/terraform/states/sync.go +++ /dev/null @@ -1,557 +0,0 @@ -package states - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// SyncState is a wrapper around State that provides concurrency-safe access to -// various common operations that occur during a Terraform graph walk, or other -// similar concurrent contexts. -// -// When a SyncState wrapper is in use, no concurrent direct access to the -// underlying objects is permitted unless the caller first acquires an explicit -// lock, using the Lock and Unlock methods. Most callers should _not_ -// explicitly lock, and should instead use the other methods of this type that -// handle locking automatically. -// -// Since SyncState is able to safely consolidate multiple updates into a single -// atomic operation, many of its methods are at a higher level than those -// of the underlying types, and operate on the state as a whole rather than -// on individual sub-structures of the state. -// -// SyncState can only protect against races within its own methods. It cannot -// provide any guarantees about the order in which concurrent operations will -// be processed, so callers may still need to employ higher-level techniques -// for ensuring correct operation sequencing, such as building and walking -// a dependency graph. -type SyncState struct { - state *State - lock sync.RWMutex -} - -// Module returns a snapshot of the state of the module instance with the given -// address, or nil if no such module is tracked. -// -// The return value is a pointer to a copy of the module state, which the -// caller may then freely access and mutate. However, since the module state -// tends to be a large data structure with many child objects, where possible -// callers should prefer to use a more granular accessor to access a child -// module directly, and thus reduce the amount of copying required. -func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { - s.lock.RLock() - ret := s.state.Module(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ModuleOutputs returns the set of OutputValues that matches the given path. -func (s *SyncState) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue { - s.lock.RLock() - defer s.lock.RUnlock() - var os []*OutputValue - - for _, o := range s.state.ModuleOutputs(parentAddr, module) { - os = append(os, o.DeepCopy()) - } - return os -} - -// RemoveModule removes the entire state for the given module, taking with -// it any resources associated with the module. This should generally be -// called only for modules whose resources have all been destroyed, but -// that is not enforced by this method. -func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) { - s.lock.Lock() - defer s.lock.Unlock() - - s.state.RemoveModule(addr) -} - -// OutputValue returns a snapshot of the state of the output value with the -// given address, or nil if no such output value is tracked. -// -// The return value is a pointer to a copy of the output value state, which the -// caller may then freely access and mutate. -func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - s.lock.RLock() - ret := s.state.OutputValue(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// SetOutputValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the output is not yet tracked in state then it -// be added as a side-effect. -func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) -} - -// RemoveOutputValue removes the stored value for the output value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveOutputValue(addr.OutputValue.Name) - s.maybePruneModule(addr.Module) -} - -// LocalValue returns the current value associated with the given local value -// address. -func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { - s.lock.RLock() - // cty.Value is immutable, so we don't need any extra copying here. - ret := s.state.LocalValue(addr) - s.lock.RUnlock() - return ret -} - -// SetLocalValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the local value is not yet tracked in state then it -// will be added as a side-effect. -func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetLocalValue(addr.LocalValue.Name, value) -} - -// RemoveLocalValue removes the stored value for the local value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveLocalValue(addr.LocalValue.Name) - s.maybePruneModule(addr.Module) -} - -// Resource returns a snapshot of the state of the resource with the given -// address, or nil if no such resource is tracked. -// -// The return value is a pointer to a copy of the resource state, which the -// caller may then freely access and mutate. -func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { - s.lock.RLock() - ret := s.state.Resource(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstance returns a snapshot of the state the resource instance with -// the given address, or nil if no such instance is tracked. -// -// The return value is a pointer to a copy of the instance state, which the -// caller may then freely access and mutate. -func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - s.lock.RLock() - ret := s.state.ResourceInstance(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstanceObject returns a snapshot of the current instance object -// of the given generation belonging to the instance with the given address, -// or nil if no such object is tracked.. -// -// The return value is a pointer to a copy of the object, which the caller may -// then freely access and mutate. -func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { - s.lock.RLock() - defer s.lock.RUnlock() - - inst := s.state.ResourceInstance(addr) - if inst == nil { - return nil - } - return inst.GetGeneration(gen).DeepCopy() -} - -// SetResourceMeta updates the resource-level metadata for the resource at -// the given address, creating the containing module state and resource state -// as a side-effect if not already present. -func (s *SyncState) SetResourceProvider(addr addrs.AbsResource, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceProvider(addr.Resource, provider) -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed, -// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead -// to safely check first.) -func (s *SyncState) RemoveResource(addr addrs.AbsResource) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.RemoveResource(addr.Resource) - s.maybePruneModule(addr.Module) -} - -// RemoveResourceIfEmpty is similar to RemoveResource but first checks to -// make sure there are no instances or objects left in the resource. -// -// Returns true if the resource was removed, or false if remaining child -// objects prevented its removal. Returns true also if the resource was -// already absent, and thus no action needed to be taken. -func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return true // nothing to do - } - rs := ms.Resource(addr.Resource) - if rs == nil { - return true // nothing to do - } - if len(rs.Instances) != 0 { - // We don't check here for the possibility of instances that exist - // but don't have any objects because it's the responsibility of the - // instance-mutation methods to prune those away automatically. - return false - } - ms.RemoveResource(addr.Resource) - s.maybePruneModule(addr.Module) - return true -} - -// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a -// resource has changed from having "count" set to not set, or vice-versa, and -// so we need to rename the zeroth instance key to no key at all, or vice-versa. -// -// Set countEnabled to true if the resource has count set in its new -// configuration, or false if it does not. -// -// The state is modified in-place if necessary, moving a resource instance -// between the two addresses. The return value is true if a change was made, -// and false otherwise. -func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.ConfigResource, countEnabled bool) bool { - s.lock.Lock() - defer s.lock.Unlock() - - // get all modules instances that may match this state - modules := s.state.ModuleInstances(addr.Module) - if len(modules) == 0 { - return false - } - - changed := false - - for _, ms := range modules { - relAddr := addr.Resource - rs := ms.Resource(relAddr) - if rs == nil { - continue - } - - huntKey := addrs.NoKey - replaceKey := addrs.InstanceKey(addrs.IntKey(0)) - if !countEnabled { - huntKey, replaceKey = replaceKey, huntKey - } - - is, exists := rs.Instances[huntKey] - if !exists { - continue - } - - if _, exists := rs.Instances[replaceKey]; exists { - // If the replacement key also exists then we'll do nothing and keep both. - continue - } - - // If we get here then we need to "rename" from hunt to replace - rs.Instances[replaceKey] = is - delete(rs.Instances, huntKey) - changed = true - } - - return changed -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance as a whole will be removed, which -// may in turn also remove the containing module if it becomes empty. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// DeposeResourceInstanceObject moves the current instance object for the -// given resource instance address into the deposed set, leaving the instance -// without a current object. -// -// The return value is the newly-allocated deposed key, or NotDeposed if the -// given instance is already lacking a current object. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then there cannot be a current object for the -// given instance, and so NotDeposed will be returned without modifying the -// state at all. -func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return NotDeposed - } - - return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) -} - -// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject -// but uses a pre-allocated key. It's the caller's responsibility to ensure -// that there aren't any races to use a particular key; this method will panic -// if the given key is already in use. -func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { - s.lock.Lock() - defer s.lock.Unlock() - - if forcedKey == NotDeposed { - // Usage error: should use DeposeResourceInstanceObject in this case - panic("DeposeResourceInstanceObjectForceKey called without forced key") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - return // Nothing to do, since there can't be any current object either. - } - - ms.deposeResourceInstanceObject(addr.Resource, forcedKey) -} - -// ForgetResourceInstanceAll removes the record of all objects associated with -// the specified resource instance, if present. If not present, this is a no-op. -func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.ForgetResourceInstanceAll(addr.Resource) - s.maybePruneModule(addr.Module) -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.ForgetResourceInstanceDeposed(addr.Resource, key) - s.maybePruneModule(addr.Module) -} - -// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the -// given key on the specified resource as the current object for that instance -// if and only if that would not cause us to forget an existing current -// object for that instance. -// -// Returns true if the object was restored to current, or false if no change -// was made at all. -func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { - s.lock.Lock() - defer s.lock.Unlock() - - if key == NotDeposed { - panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - // Nothing to do, since the specified deposed object cannot exist. - return false - } - - return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) -} - -// RemovePlannedResourceInstanceObjects removes from the state any resource -// instance objects that have the status ObjectPlanned, indiciating that they -// are just transient placeholders created during planning. -// -// Note that this does not restore any "ready" or "tainted" object that might -// have been present before the planned object was written. The only real use -// for this method is in preparing the state created during a refresh walk, -// where we run the planning step for certain instances just to create enough -// information to allow correct expression evaluation within provider and -// data resource blocks. Discarding planned instances in that case is okay -// because the refresh phase only creates planned objects to stand in for -// objects that don't exist yet, and thus the planned object must have been -// absent before by definition. -func (s *SyncState) RemovePlannedResourceInstanceObjects() { - // TODO: Merge together the refresh and plan phases into a single walk, - // so we can remove the need to create this "partial plan" during refresh - // that we then need to clean up before proceeding. - - s.lock.Lock() - defer s.lock.Unlock() - - for _, ms := range s.state.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resAddr := rs.Addr.Resource - - for ik, is := range rs.Instances { - instAddr := resAddr.Instance(ik) - - if is.Current != nil && is.Current.Status == ObjectPlanned { - // Setting the current instance to nil removes it from the - // state altogether if there are not also deposed instances. - ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) - } - - for dk, obj := range is.Deposed { - // Deposed objects should never be "planned", but we'll - // do this anyway for the sake of completeness. - if obj.Status == ObjectPlanned { - ms.ForgetResourceInstanceDeposed(instAddr, dk) - } - } - } - } - - // We may have deleted some objects, which means that we may have - // left a module empty, and so we must prune to preserve the invariant - // that only the root module is allowed to be empty. - s.maybePruneModule(moduleAddr) - } -} - -// Lock acquires an explicit lock on the state, allowing direct read and write -// access to the returned state object. The caller must call Unlock once -// access is no longer needed, and then immediately discard the state pointer -// pointer. -// -// Most callers should not use this. Instead, use the concurrency-safe -// accessors and mutators provided directly on SyncState. -func (s *SyncState) Lock() *State { - s.lock.Lock() - return s.state -} - -// Unlock releases a lock previously acquired by Lock, at which point the -// caller must cease all use of the state pointer that was returned. -// -// Do not call this method except to end an explicit lock acquired by -// Lock. If a caller calls Unlock without first holding the lock, behavior -// is undefined. -func (s *SyncState) Unlock() { - s.lock.Unlock() -} - -// maybePruneModule will remove a module from the state altogether if it is -// empty, unless it's the root module which must always be present. -// -// This helper method is not concurrency-safe on its own, so must only be -// called while the caller is already holding the lock for writing. -func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - // We never prune the root. - return - } - - ms := s.state.Module(addr) - if ms == nil { - return - } - - if ms.empty() { - log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) - s.state.RemoveModule(addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go deleted file mode 100644 index ebe56542..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ /dev/null @@ -1,886 +0,0 @@ -package terraform - -import ( - "bytes" - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/states/statefile" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// InputMode defines what sort of input will be asked for when Input -// is called on Context. -type InputMode byte - -const ( - // InputModeProvider asks for provider variables - InputModeProvider InputMode = 1 << iota - - // InputModeStd is the standard operating mode and asks for both variables - // and providers. - InputModeStd = InputModeProvider -) - -var ( - // contextFailOnShadowError will cause Context operations to return - // errors when shadow operations fail. This is only used for testing. - contextFailOnShadowError = false - - // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every - // Plan operation, effectively testing the Diff DeepCopy whenever - // a Plan occurs. This is enabled for tests. - contextTestDeepCopyOnPlan = false -) - -// ContextOpts are the user-configurable options to create a context with -// NewContext. -type ContextOpts struct { - Config *configs.Config - Changes *plans.Changes - State *states.State - Targets []addrs.Targetable - Variables InputValues - Meta *ContextMeta - Destroy bool - - Hooks []Hook - Parallelism int - Providers map[addrs.Provider]providers.Factory - Provisioners map[string]provisioners.Factory - - // If non-nil, will apply as additional constraints on the provider - // plugins that will be requested from the provider resolver. - ProviderSHA256s map[string][]byte - SkipProviderVerify bool - - UIInput UIInput -} - -// ContextMeta is metadata about the running context. This is information -// that this package or structure cannot determine on its own but exposes -// into Terraform in various ways. This must be provided by the Context -// initializer. -type ContextMeta struct { - Env string // Env is the state environment -} - -// Context represents all the context that Terraform needs in order to -// perform operations on infrastructure. This structure is built using -// NewContext. -type Context struct { - config *configs.Config - changes *plans.Changes - state *states.State - targets []addrs.Targetable - variables InputValues - meta *ContextMeta - destroy bool - - hooks []Hook - components contextComponentFactory - schemas *Schemas - sh *stopHook - uiInput UIInput - - l sync.Mutex // Lock acquired during any task - parallelSem Semaphore - providerInputConfig map[string]map[string]cty.Value - providerSHA256s map[string][]byte - runLock sync.Mutex - runCond *sync.Cond - runContext context.Context - runContextCancel context.CancelFunc - shadowErr error -} - -// (additional methods on Context can be found in context_*.go files.) - -// NewContext creates a new Context structure. -// -// Once a Context is created, the caller must not access or mutate any of -// the objects referenced (directly or indirectly) by the ContextOpts fields. -// -// If the returned diagnostics contains errors then the resulting context is -// invalid and must not be used. -func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform.NewContext: starting") - diags := CheckCoreVersionRequirements(opts.Config) - // If version constraints are not met then we'll bail early since otherwise - // we're likely to just see a bunch of other errors related to - // incompatibilities, which could be overwhelming for the user. - if diags.HasErrors() { - return nil, diags - } - - // Copy all the hooks and add our stop hook. We don't append directly - // to the Config so that we're not modifying that in-place. - sh := new(stopHook) - hooks := make([]Hook, len(opts.Hooks)+1) - copy(hooks, opts.Hooks) - hooks[len(opts.Hooks)] = sh - - state := opts.State - if state == nil { - state = states.NewState() - } - - // Determine parallelism, default to 10. We do this both to limit - // CPU pressure but also to have an extra guard against rate throttling - // from providers. - // We throw an error in case of negative parallelism - par := opts.Parallelism - if par < 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid parallelism value", - fmt.Sprintf("The parallelism must be a positive value. Not %d.", par), - )) - return nil, diags - } - - if par == 0 { - par = 10 - } - - // Set up the variables in the following sequence: - // 0 - Take default values from the configuration - // 1 - Take values from TF_VAR_x environment variables - // 2 - Take values specified in -var flags, overriding values - // set by environment variables if necessary. This includes - // values taken from -var-file in addition. - var variables InputValues - if opts.Config != nil { - // Default variables from the configuration seed our map. - variables = DefaultVariableValues(opts.Config.Module.Variables) - } - // Variables provided by the caller (from CLI, environment, etc) can - // override the defaults. - variables = variables.Override(opts.Variables) - - components := &basicComponentFactory{ - providers: opts.Providers, - provisioners: opts.Provisioners, - } - - log.Printf("[TRACE] terraform.NewContext: loading provider schemas") - schemas, err := LoadSchemas(opts.Config, opts.State, components) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Could not load plugin", - fmt.Sprintf(errPluginInit, err), - )) - return nil, diags - } - - changes := opts.Changes - if changes == nil { - changes = plans.NewChanges() - } - - config := opts.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - log.Printf("[TRACE] terraform.NewContext: complete") - - // By the time we get here, we should have values defined for all of - // the root module variables, even if some of them are "unknown". It's the - // caller's responsibility to have already handled the decoding of these - // from the various ways the CLI allows them to be set and to produce - // user-friendly error messages if they are not all present, and so - // the error message from checkInputVariables should never be seen and - // includes language asking the user to report a bug. - if config != nil { - varDiags := checkInputVariables(config.Module.Variables, variables) - diags = diags.Append(varDiags) - } - - return &Context{ - components: components, - schemas: schemas, - destroy: opts.Destroy, - changes: changes, - hooks: hooks, - meta: opts.Meta, - config: config, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, - - parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]cty.Value), - providerSHA256s: opts.ProviderSHA256s, - sh: sh, - }, diags -} - -func (c *Context) Schemas() *Schemas { - return c.schemas -} - -type ContextGraphOpts struct { - // If true, validates the graph structure (checks for cycles). - Validate bool - - // Legacy graphs only: won't prune the graph - Verbose bool -} - -// Graph returns the graph used for the given operation type. -// -// The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { - if opts == nil { - opts = &ContextGraphOpts{Validate: true} - } - - log.Printf("[INFO] terraform: building graph: %s", typ) - switch typ { - case GraphTypeApply: - return (&ApplyGraphBuilder{ - Config: c.config, - Changes: c.changes, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeValidate: - // The validate graph is just a slightly modified plan graph - fallthrough - case GraphTypePlan: - // Create the plan graph builder - p := &PlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - } - - // Some special cases for other graph types shared with plan currently - var b GraphBuilder = p - switch typ { - case GraphTypeValidate: - b = ValidateGraphBuilder(p) - } - - return b.Build(addrs.RootModuleInstance) - - case GraphTypePlanDestroy: - return (&DestroyPlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeRefresh: - return (&RefreshGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeEval: - return (&EvalGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - }).Build(addrs.RootModuleInstance) - - default: - // Should never happen, because the above is exhaustive for all graph types. - panic(fmt.Errorf("unsupported graph type %s", typ)) - } -} - -// ShadowError returns any errors caught during a shadow operation. -// -// A shadow operation is an operation run in parallel to a real operation -// that performs the same tasks using new logic on copied state. The results -// are compared to ensure that the new logic works the same as the old logic. -// The shadow never affects the real operation or return values. -// -// The result of the shadow operation are only available through this function -// call after a real operation is complete. -// -// For API consumers of Context, you can safely ignore this function -// completely if you have no interest in helping report experimental feature -// errors to Terraform maintainers. Otherwise, please call this function -// after every operation and report this to the user. -// -// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect -// the real state or result of a real operation. They are purely informational -// to assist in future Terraform versions being more stable. Please message -// this effectively to the end user. -// -// This must be called only when no other operation is running (refresh, -// plan, etc.). The result can be used in parallel to any other operation -// running. -func (c *Context) ShadowError() error { - return c.shadowErr -} - -// State returns a copy of the current state associated with this context. -// -// This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *states.State { - return c.state.DeepCopy() -} - -// Eval produces a scope in which expressions can be evaluated for -// the given module path. -// -// This method must first evaluate any ephemeral values (input variables, local -// values, and output values) in the configuration. These ephemeral values are -// not included in the persisted state, so they must be re-computed using other -// values in the state before they can be properly evaluated. The updated -// values are retained in the main state associated with the receiving context. -// -// This function takes no action against remote APIs but it does need access -// to all provider and provisioner instances in order to obtain their schemas -// for type checking. -// -// The result is an evaluation scope that can be used to resolve references -// against the root module. If the returned diagnostics contains errors then -// the returned scope may be nil. If it is not nil then it may still be used -// to attempt expression evaluation or other analysis, but some expressions -// may not behave as expected. -func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { - // This is intended for external callers such as the "terraform console" - // command. Internally, we create an evaluator in c.walk before walking - // the graph, and create scopes in ContextGraphWalker. - - var diags tfdiags.Diagnostics - defer c.acquireRun("eval")() - - // Start with a copy of state so that we don't affect any instances - // that other methods may have already returned. - c.state = c.state.DeepCopy() - var walker *ContextGraphWalker - - graph, graphDiags := c.Graph(GraphTypeEval, nil) - diags = diags.Append(graphDiags) - if !diags.HasErrors() { - var walkDiags tfdiags.Diagnostics - walker, walkDiags = c.walk(graph, walkEval) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - } - - if walker == nil { - // If we skipped walking the graph (due to errors) then we'll just - // use a placeholder graph walker here, which'll refer to the - // unmodified state. - walker = c.graphWalker(walkEval) - } - - // This is a bit weird since we don't normally evaluate outside of - // the context of a walk, but we'll "re-enter" our desired path here - // just to get hold of an EvalContext for it. GraphContextBuiltin - // caches its contexts, so we should get hold of the context that was - // previously used for evaluation here, unless we skipped walking. - evalCtx := walker.EnterPath(path) - return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags -} - -// Apply applies the changes represented by this context and returns -// the resulting state. -// -// Even in the case an error is returned, the state may be returned and will -// potentially be partially updated. In addition to returning the resulting -// state, this context is updated with the latest state. -// -// If the state is required after an error, the caller should call -// Context.State, rather than rely on the return value. -// -// TODO: Apply and Refresh should either always return a state, or rely on the -// State() method. Currently the helper/resource testing framework relies -// on the absence of a returned state to determine if Destroy can be -// called, so that will need to be refactored before this can be changed. -func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("apply")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Build the graph. - graph, diags := c.Graph(GraphTypeApply, nil) - if diags.HasErrors() { - return nil, diags - } - - // Determine the operation - operation := walkApply - if c.destroy { - operation = walkDestroy - } - - // Walk the graph - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - - if c.destroy && !diags.HasErrors() { - // If we know we were trying to destroy objects anyway, and we - // completed without any errors, then we'll also prune out any - // leftover empty resource husks (left after all of the instances - // of a resource with "count" or "for_each" are destroyed) to - // help ensure we end up with an _actually_ empty state, assuming - // we weren't destroying with -target here. - // - // (This doesn't actually take into account -target, but that should - // be okay because it doesn't throw away anything we can't recompute - // on a subsequent "terraform plan" run, if the resources are still - // present in the configuration. However, this _will_ cause "count = 0" - // resources to read as unknown during the next refresh walk, which - // may cause some additional churn if used in a data resource or - // provider block, until we remove refreshing as a separate walk and - // just do it as part of the plan walk.) - c.state.PruneResourceHusks() - } - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Applied changes may be incomplete", - `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: - terraform plan - -Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - return c.state, diags -} - -// Plan generates an execution plan for the given context. -// -// The execution plan encapsulates the context and can be stored -// in order to reinstantiate a context later for Apply. -// -// Plan also updates the diff of this context to be the diff generated -// by the plan, so Apply can be called after. -func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { - defer c.acquireRun("plan")() - c.changes = plans.NewChanges() - - var diags tfdiags.Diagnostics - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - varVals := make(map[string]plans.DynamicValue, len(c.variables)) - for k, iv := range c.variables { - // We use cty.DynamicPseudoType here so that we'll save both the - // value _and_ its dynamic type in the plan, so we can recover - // exactly the same value later. - dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to prepare variable value for plan", - fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), - )) - continue - } - varVals[k] = dv - } - - p := &plans.Plan{ - VariableValues: varVals, - TargetAddrs: c.targets, - ProviderSHA256s: c.providerSHA256s, - } - - var operation walkOperation - if c.destroy { - operation = walkPlanDestroy - } else { - // Set our state to be something temporary. We do this so that - // the plan can update a fake state so that variables work, then - // we replace it back with our old state. - old := c.state - if old == nil { - c.state = states.NewState() - } else { - c.state = old.DeepCopy() - } - defer func() { - c.state = old - }() - - operation = walkPlan - } - - // Build the graph. - graphType := GraphTypePlan - if c.destroy { - graphType = GraphTypePlanDestroy - } - graph, graphDiags := c.Graph(graphType, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return nil, diags - } - - // Do the walk - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - p.Changes = c.changes - - return p, diags -} - -// Refresh goes through all the resources in the state and refreshes them -// to their latest state. This will update the state that this context -// works with, along with returning it. -// -// Even in the case an error is returned, the state may be returned and -// will potentially be partially updated. -func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("refresh")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Refresh builds a partial changeset as part of its work because it must - // create placeholder stubs for any resource instances that'll be created - // in subsequent plan so that provider configurations and data resources - // can interpolate from them. This plan is always thrown away after - // the operation completes, restoring any existing changeset. - oldChanges := c.changes - defer func() { c.changes = oldChanges }() - c.changes = plans.NewChanges() - - // Build the graph. - graph, diags := c.Graph(GraphTypeRefresh, nil) - if diags.HasErrors() { - return nil, diags - } - - // Do the walk - _, walkDiags := c.walk(graph, walkRefresh) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - - // During our walk we will have created planned object placeholders in - // state for resource instances that are in configuration but not yet - // created. These were created only to allow expression evaluation to - // work properly in provider and data blocks during the walk and must - // now be discarded, since a subsequent plan walk is responsible for - // creating these "for real". - // TODO: Consolidate refresh and plan into a single walk, so that the - // refresh walk doesn't need to emulate various aspects of the plan - // walk in order to properly evaluate provider and data blocks. - c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() - - return c.state, diags -} - -// Stop stops the running task. -// -// Stop will block until the task completes. -func (c *Context) Stop() { - log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") - - c.l.Lock() - defer c.l.Unlock() - - // If we're running, then stop - if c.runContextCancel != nil { - log.Printf("[WARN] terraform: run context exists, stopping") - - // Tell the hook we want to stop - c.sh.Stop() - - // Stop the context - c.runContextCancel() - c.runContextCancel = nil - } - - // Grab the condition var before we exit - if cond := c.runCond; cond != nil { - log.Printf("[INFO] terraform: waiting for graceful stop to complete") - cond.Wait() - } - - log.Printf("[WARN] terraform: stop complete") -} - -// Validate performs semantic validation of the configuration, and returning -// any warnings or errors. -// -// Syntax and structural checks are performed by the configuration loader, -// and so are not repeated here. -func (c *Context) Validate() tfdiags.Diagnostics { - defer c.acquireRun("validate")() - - var diags tfdiags.Diagnostics - - // If we have errors at this point then we probably won't be able to - // construct a graph without producing redundant errors, so we'll halt early. - if diags.HasErrors() { - return diags - } - - // Build the graph so we can walk it and run Validate on nodes. - // We also validate the graph generated here, but this graph doesn't - // necessarily match the graph that Plan will generate, so we'll validate the - // graph again later after Planning. - graph, graphDiags := c.Graph(GraphTypeValidate, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return diags - } - - // Walk - walker, walkDiags := c.walk(graph, walkValidate) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return diags - } - - return diags -} - -// Config returns the configuration tree associated with this context. -func (c *Context) Config() *configs.Config { - return c.config -} - -// Variables will return the mapping of variables that were defined -// for this Context. If Input was called, this mapping may be different -// than what was given. -func (c *Context) Variables() InputValues { - return c.variables -} - -// SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v cty.Value) { - c.variables[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } -} - -func (c *Context) acquireRun(phase string) func() { - // With the run lock held, grab the context lock to make changes - // to the run context. - c.l.Lock() - defer c.l.Unlock() - - // Wait until we're no longer running - for c.runCond != nil { - c.runCond.Wait() - } - - // Build our lock - c.runCond = sync.NewCond(&c.l) - - // Create a new run context - c.runContext, c.runContextCancel = context.WithCancel(context.Background()) - - // Reset the stop hook so we're not stopped - c.sh.Reset() - - // Reset the shadow errors - c.shadowErr = nil - - return c.releaseRun -} - -func (c *Context) releaseRun() { - // Grab the context lock so that we can make modifications to fields - c.l.Lock() - defer c.l.Unlock() - - // End our run. We check if runContext is non-nil because it can be - // set to nil if it was cancelled via Stop() - if c.runContextCancel != nil { - c.runContextCancel() - } - - // Unlock all waiting our condition - cond := c.runCond - c.runCond = nil - cond.Broadcast() - - // Unset the context - c.runContext = nil -} - -func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - - walker := c.graphWalker(operation) - - // Watch for a stop so we can call the provider Stop() API. - watchStop, watchWait := c.watchStop(walker) - - // Walk the real graph, this will block until it completes - diags := graph.Walk(walker) - - // Close the channel so the watcher stops, and wait for it to return. - close(watchStop) - <-watchWait - - return walker, diags -} - -func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { - return &ContextGraphWalker{ - Context: c, - State: c.state.SyncWrapper(), - Changes: c.changes.SyncWrapper(), - InstanceExpander: instances.NewExpander(), - Operation: operation, - StopContext: c.runContext, - RootVariableValues: c.variables, - } -} - -// watchStop immediately returns a `stop` and a `wait` chan after dispatching -// the watchStop goroutine. This will watch the runContext for cancellation and -// stop the providers accordingly. When the watch is no longer needed, the -// `stop` chan should be closed before waiting on the `wait` chan. -// The `wait` chan is important, because without synchronizing with the end of -// the watchStop goroutine, the runContext may also be closed during the select -// incorrectly causing providers to be stopped. Even if the graph walk is done -// at that point, stopping a provider permanently cancels its StopContext which -// can cause later actions to fail. -func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { - stop := make(chan struct{}) - wait := make(chan struct{}) - - // get the runContext cancellation channel now, because releaseRun will - // write to the runContext field. - done := c.runContext.Done() - - go func() { - defer close(wait) - // Wait for a stop or completion - select { - case <-done: - // done means the context was canceled, so we need to try and stop - // providers. - case <-stop: - // our own stop channel was closed. - return - } - - // If we're here, we're stopped, trigger the call. - log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") - - { - // Copy the providers so that a misbehaved blocking Stop doesn't - // completely hang Terraform. - walker.providerLock.Lock() - ps := make([]providers.Interface, 0, len(walker.providerCache)) - for _, p := range walker.providerCache { - ps = append(ps, p) - } - defer walker.providerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - - { - // Call stop on all the provisioners - walker.provisionerLock.Lock() - ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) - for _, p := range walker.provisionerCache { - ps = append(ps, p) - } - defer walker.provisionerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - }() - - return stop, wait -} - -// ShimLegacyState is a helper that takes the legacy state type and -// converts it to the new state type. -// -// This is implemented as a state file upgrade, so it will not preserve -// parts of the state structure that are not included in a serialized state, -// such as the resolved results of any local values, outputs in non-root -// modules, etc. -func ShimLegacyState(legacy *State) (*states.State, error) { - if legacy == nil { - return nil, nil - } - var buf bytes.Buffer - err := WriteState(legacy, &buf) - if err != nil { - return nil, err - } - f, err := statefile.Read(&buf) - if err != nil { - return nil, err - } - return f.State, err -} - -// MustShimLegacyState is a wrapper around ShimLegacyState that panics if -// the conversion does not succeed. This is primarily intended for tests where -// the given legacy state is an object constructed within the test. -func MustShimLegacyState(legacy *State) *states.State { - ret, err := ShimLegacyState(legacy) - if err != nil { - panic(err) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go deleted file mode 100644 index c893a16b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" -) - -// contextComponentFactory is the interface that Context uses -// to initialize various components such as providers and provisioners. -// This factory gets more information than the raw maps using to initialize -// a Context. This information is used for debugging. -type contextComponentFactory interface { - // ResourceProvider creates a new ResourceProvider with the given type. - ResourceProvider(typ addrs.Provider) (providers.Interface, error) - ResourceProviders() []string - - // ResourceProvisioner creates a new ResourceProvisioner with the given - // type. - ResourceProvisioner(typ string) (provisioners.Interface, error) - ResourceProvisioners() []string -} - -// basicComponentFactory just calls a factory from a map directly. -type basicComponentFactory struct { - providers map[addrs.Provider]providers.Factory - provisioners map[string]ProvisionerFactory -} - -func (c *basicComponentFactory) ResourceProviders() []string { - var result []string - for k := range c.providers { - result = append(result, k.String()) - } - return result -} - -func (c *basicComponentFactory) ResourceProvisioners() []string { - var result []string - for k := range c.provisioners { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvider(typ addrs.Provider) (providers.Interface, error) { - f, ok := c.providers[typ] - if !ok { - return nil, fmt.Errorf("unknown provider %q", typ.String()) - } - - return f() -} - -func (c *basicComponentFactory) ResourceProvisioner(typ string) (provisioners.Interface, error) { - f, ok := c.provisioners[typ] - if !ok { - return nil, fmt.Errorf("unknown provisioner %q", typ) - } - - return f() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go deleted file mode 100644 index 4448d870..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go - -// GraphType is an enum of the type of graph to create with a Context. -// The values of the constants may change so they shouldn't be depended on; -// always use the constant name. -type GraphType byte - -const ( - GraphTypeInvalid GraphType = 0 - GraphTypeLegacy GraphType = iota - GraphTypeRefresh - GraphTypePlan - GraphTypePlanDestroy - GraphTypeApply - GraphTypeValidate - GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. -) - -// GraphTypeMap is a mapping of human-readable string to GraphType. This -// is useful to use as the mechanism for human input for configurable -// graph types. -var GraphTypeMap = map[string]GraphType{ - "apply": GraphTypeApply, - "plan": GraphTypePlan, - "plan-destroy": GraphTypePlanDestroy, - "refresh": GraphTypeRefresh, - "legacy": GraphTypeLegacy, - "validate": GraphTypeValidate, - "eval": GraphTypeEval, -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go deleted file mode 100644 index 4a35c29d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportOpts are used as the configuration for Import. -type ImportOpts struct { - // Targets are the targets to import - Targets []*ImportTarget -} - -// ImportTarget is a single resource to import. -type ImportTarget struct { - // Addr is the address for the resource instance that the new object should - // be imported into. - Addr addrs.AbsResourceInstance - - // ID is the ID of the resource to import. This is resource-specific. - ID string - - // ProviderAddr is the address of the provider that should handle the import. - ProviderAddr addrs.AbsProviderConfig -} - -// Import takes already-created external resources and brings them -// under Terraform management. Import requires the exact type, name, and ID -// of the resources to import. -// -// This operation is idempotent. If the requested resource is already -// imported, no changes are made to the state. -// -// Further, this operation also gracefully handles partial state. If during -// an import there is a failure, all previously imported resources remain -// imported. -func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Hold a lock since we can modify our own state here - defer c.acquireRun("import")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Initialize our graph builder - builder := &ImportGraphBuilder{ - ImportTargets: opts.Targets, - Config: c.config, - Components: c.components, - Schemas: c.schemas, - } - - // Build the graph! - graph, graphDiags := builder.Build(addrs.RootModuleInstance) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return c.state, diags - } - - // Walk it - _, walkDiags := c.walk(graph, walkImport) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return c.state, diags - } - - return c.state, diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_input.go b/vendor/github.com/hashicorp/terraform/terraform/context_input.go deleted file mode 100644 index 305bae77..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_input.go +++ /dev/null @@ -1,190 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" -) - -// Input asks for input to fill unset required arguments in provider -// configurations. -// -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { - // This function used to be responsible for more than it is now, so its - // interface is more general than its current functionality requires. - // It now exists only to handle interactive prompts for provider - // configurations, with other prompts the responsibility of the CLI - // layer prior to calling in to this package. - // - // (Hopefully in future the remaining functionality here can move to the - // CLI layer too in order to avoid this odd situation where core code - // produces UI input prompts.) - - var diags tfdiags.Diagnostics - defer c.acquireRun("input")() - - if c.uiInput == nil { - log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") - return diags - } - - ctx := context.Background() - - if mode&InputModeProvider != 0 { - log.Printf("[TRACE] Context.Input: Prompting for provider arguments") - - // We prompt for input only for provider configurations defined in - // the root module. At the time of writing that is an arbitrary - // restriction, but we have future plans to support "count" and - // "for_each" on modules that will then prevent us from supporting - // input for child module configurations anyway (since we'd need to - // dynamic-expand first), and provider configurations in child modules - // are not recommended since v0.11 anyway, so this restriction allows - // us to keep this relatively simple without significant hardship. - - pcs := make(map[string]*configs.Provider) - pas := make(map[string]addrs.LocalProviderConfig) - for _, pc := range c.config.Module.ProviderConfigs { - addr := pc.Addr() - pcs[addr.String()] = pc - pas[addr.String()] = addr - log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) - } - // We also need to detect _implied_ provider configs from resources. - // These won't have *configs.Provider objects, but they will still - // exist in the map and we'll just treat them as empty below. - for _, rc := range c.config.Module.ManagedResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) - } - } - for _, rc := range c.config.Module.DataResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) - } - } - - for pk, pa := range pas { - pc := pcs[pk] // will be nil if this is an implied config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: pk, - QueryPrefix: pk + ".", - UIInput: c.uiInput, - } - - providerFqn := c.config.Module.ProviderForLocalConfig(pa) - schema := c.schemas.ProviderConfig(providerFqn) - if schema == nil { - // Could either be an incorrect config or just an incomplete - // mock in tests. We'll let a later pass decide, and just - // ignore this for the purposes of gathering input. - log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.LocalName) - continue - } - - // For our purposes here we just want to detect if attrbutes are - // set in config at all, so rather than doing a full decode - // (which would require us to prepare an evalcontext, etc) we'll - // use the low-level HCL API to process only the top-level - // structure. - var attrExprs hcl.Attributes // nil if there is no config - if pc != nil && pc.Config != nil { - lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) - content, _, diags := pc.Config.PartialContent(lowLevelSchema) - if diags.HasErrors() { - log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) - continue - } - attrExprs = content.Attributes - } - - keys := make([]string, 0, len(schema.Attributes)) - for key := range schema.Attributes { - keys = append(keys, key) - } - sort.Strings(keys) - - vals := map[string]cty.Value{} - for _, key := range keys { - attrS := schema.Attributes[key] - if attrS.Optional { - continue - } - if attrExprs != nil { - if _, exists := attrExprs[key]; exists { - continue - } - } - if !attrS.Type.Equals(cty.String) { - continue - } - - log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) - rawVal, err := input.Input(ctx, &InputOpts{ - Id: key, - Query: key, - Description: attrS.Description, - }) - if err != nil { - log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) - continue - } - - vals[key] = cty.StringVal(rawVal) - } - - absConfigAddr := addrs.AbsProviderConfig{ - Provider: providerFqn, - Alias: pa.Alias, - Module: c.Config().Path, - } - c.providerInputConfig[absConfigAddr.String()] = vals - - log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) - } - } - - return diags -} - -// schemaForInputSniffing returns a transformed version of a given schema -// that marks all attributes as optional, which the Context.Input method can -// use to detect whether a required argument is set without missing arguments -// themselves generating errors. -func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go deleted file mode 100644 index 4e834204..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ /dev/null @@ -1,1451 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" - - "github.com/mitchellh/copystructure" -) - -// DiffChangeType is an enum with the kind of changes a diff has planned. -type DiffChangeType byte - -const ( - DiffInvalid DiffChangeType = iota - DiffNone - DiffCreate - DiffUpdate - DiffDestroy - DiffDestroyCreate - - // DiffRefresh is only used in the UI for displaying diffs. - // Managed resource reads never appear in plan, and when data source - // reads appear they are represented as DiffCreate in core before - // transforming to DiffRefresh in the UI layer. - DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion -) - -// multiVal matches the index key to a flatmapped set, list or map -var multiVal = regexp.MustCompile(`\.(#|%)$`) - -// Diff tracks the changes that are necessary to apply a configuration -// to an existing infrastructure. -type Diff struct { - // Modules contains all the modules that have a diff - Modules []*ModuleDiff -} - -// Prune cleans out unused structures in the diff without affecting -// the behavior of the diff at all. -// -// This is not safe to call concurrently. This is safe to call on a -// nil Diff. -func (d *Diff) Prune() { - if d == nil { - return - } - - // Prune all empty modules - newModules := make([]*ModuleDiff, 0, len(d.Modules)) - for _, m := range d.Modules { - // If the module isn't empty, we keep it - if !m.Empty() { - newModules = append(newModules, m) - } - } - if len(newModules) == 0 { - newModules = nil - } - d.Modules = newModules -} - -// AddModule adds the module with the given path to the diff. -// -// This should be the preferred method to add module diffs since it -// allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - legacyPath := make([]string, len(path)) - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("diff cannot represent modules with count or for_each keys") - } - - legacyPath[i] = step.Name - } - - m := &ModuleDiff{Path: legacyPath} - m.init() - d.Modules = append(d.Modules, m) - return m -} - -// ModuleByPath is used to lookup the module diff for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { - if d == nil { - return nil - } - for _, mod := range d.Modules { - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// RootModule returns the ModuleState for the root module -func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Empty returns true if the diff has no changes. -func (d *Diff) Empty() bool { - if d == nil { - return true - } - - for _, m := range d.Modules { - if !m.Empty() { - return false - } - } - - return true -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *Diff) Equal(d2 *Diff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Sort the modules - sort.Sort(moduleDiffSort(d.Modules)) - sort.Sort(moduleDiffSort(d2.Modules)) - - // Copy since we have to modify the module destroy flag to false so - // we don't compare that. TODO: delete this when we get rid of the - // destroy flag on modules. - dCopy := d.DeepCopy() - d2Copy := d2.DeepCopy() - for _, m := range dCopy.Modules { - m.Destroy = false - } - for _, m := range d2Copy.Modules { - m.Destroy = false - } - - // Use DeepEqual - return reflect.DeepEqual(dCopy, d2Copy) -} - -// DeepCopy performs a deep copy of all parts of the Diff, making the -// resulting Diff safe to use without modifying this one. -func (d *Diff) DeepCopy() *Diff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*Diff) -} - -func (d *Diff) String() string { - var buf bytes.Buffer - - keys := make([]string, 0, len(d.Modules)) - lookup := make(map[string]*ModuleDiff) - for _, m := range d.Modules { - addr := normalizeModulePath(m.Path) - key := addr.String() - keys = append(keys, key) - lookup[key] = m - } - sort.Strings(keys) - - for _, key := range keys { - m := lookup[key] - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("%s:\n", key)) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return strings.TrimSpace(buf.String()) -} - -func (d *Diff) init() { - if d.Modules == nil { - rootDiff := &ModuleDiff{Path: rootModulePath} - d.Modules = []*ModuleDiff{rootDiff} - } - for _, m := range d.Modules { - m.init() - } -} - -// ModuleDiff tracks the differences between resources to apply within -// a single module. -type ModuleDiff struct { - Path []string - Resources map[string]*InstanceDiff - Destroy bool // Set only by the destroy plan -} - -func (d *ModuleDiff) init() { - if d.Resources == nil { - d.Resources = make(map[string]*InstanceDiff) - } - for _, r := range d.Resources { - r.init() - } -} - -// ChangeType returns the type of changes that the diff for this -// module includes. -// -// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or -// DiffCreate. If an instance within the module has a DiffDestroyCreate -// then this will register as a DiffCreate for a module. -func (d *ModuleDiff) ChangeType() DiffChangeType { - result := DiffNone - for _, r := range d.Resources { - change := r.ChangeType() - switch change { - case DiffCreate, DiffDestroy: - if result == DiffNone { - result = change - } - case DiffDestroyCreate, DiffUpdate: - result = DiffUpdate - } - } - - return result -} - -// Empty returns true if the diff has no changes within this module. -func (d *ModuleDiff) Empty() bool { - if d.Destroy { - return false - } - - if len(d.Resources) == 0 { - return true - } - - for _, rd := range d.Resources { - if !rd.Empty() { - return false - } - } - - return true -} - -// Instances returns the instance diffs for the id given. This can return -// multiple instance diffs if there are counts within the resource. -func (d *ModuleDiff) Instances(id string) []*InstanceDiff { - var result []*InstanceDiff - for k, diff := range d.Resources { - if k == id || strings.HasPrefix(k, id+".") { - if !diff.Empty() { - result = append(result, diff) - } - } - } - - return result -} - -// IsRoot says whether or not this module diff is for the root module. -func (d *ModuleDiff) IsRoot() bool { - return reflect.DeepEqual(d.Path, rootModulePath) -} - -// String outputs the diff in a long but command-line friendly output -// format that users can read to quickly inspect a diff. -func (d *ModuleDiff) String() string { - var buf bytes.Buffer - - names := make([]string, 0, len(d.Resources)) - for name, _ := range d.Resources { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - rdiff := d.Resources[name] - - crud := "UPDATE" - switch { - case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): - crud = "DESTROY/CREATE" - case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): - crud = "DESTROY" - case rdiff.RequiresNew(): - crud = "CREATE" - } - - extra := "" - if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { - extra = " (deposed only)" - } - - buf.WriteString(fmt.Sprintf( - "%s: %s%s\n", - crud, - name, - extra)) - - keyLen := 0 - rdiffAttrs := rdiff.CopyAttributes() - keys := make([]string, 0, len(rdiffAttrs)) - for key, _ := range rdiffAttrs { - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - for _, attrK := range keys { - attrDiff, _ := rdiff.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - updateMsg := "" - if attrDiff.RequiresNew { - updateMsg = " (forces new resource)" - } else if attrDiff.Sensitive { - updateMsg = " (attribute changed)" - } - - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } - } - - return buf.String() -} - -// InstanceDiff is the diff of a resource from some state to another. -type InstanceDiff struct { - mu sync.Mutex - Attributes map[string]*ResourceAttrDiff - Destroy bool - DestroyDeposed bool - DestroyTainted bool - - // Meta is a simple K/V map that is stored in a diff and persisted to - // plans but otherwise is completely ignored by Terraform core. It is - // meant to be used for additional data a resource may want to pass through. - // The value here must only contain Go primitives and collections. - Meta map[string]interface{} -} - -func (d *InstanceDiff) Lock() { d.mu.Lock() } -func (d *InstanceDiff) Unlock() { d.mu.Unlock() } - -// ApplyToValue merges the receiver into the given base value, returning a -// new value that incorporates the planned changes. The given value must -// conform to the given schema, or this method will panic. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { - // Create an InstanceState attributes from our existing state. - // We can use this to more easily apply the diff changes. - attrs := hcl2shim.FlatmapValueFromHCL2(base) - applied, err := d.Apply(attrs, schema) - if err != nil { - return base, err - } - - val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) - if err != nil { - return base, err - } - - return schema.CoerceValue(val) -} - -// Apply applies the diff to the provided flatmapped attributes, -// returning the new instance attributes. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - // We always build a new value here, even if the given diff is "empty", - // because we might be planning to create a new instance that happens - // to have no attributes set, and so we want to produce an empty object - // rather than just echoing back the null old value. - if attrs == nil { - attrs = map[string]string{} - } - - // Rather applying the diff to mutate the attrs, we'll copy new values into - // here to avoid the possibility of leaving stale values. - result := map[string]string{} - - if d.Destroy || d.DestroyDeposed || d.DestroyTainted { - return result, nil - } - - return d.applyBlockDiff(nil, attrs, schema) -} - -func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - result := map[string]string{} - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - // localPrefix is used to build the local result map - localPrefix := "" - if name != "" { - localPrefix = name + "." - } - - // iterate over the schema rather than the attributes, so we can handle - // different block types separately from plain attributes - for n, attrSchema := range schema.Attributes { - var err error - newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) - - if err != nil { - return result, err - } - - for k, v := range newAttrs { - result[localPrefix+k] = v - } - } - - blockPrefix := strings.Join(path, ".") - if blockPrefix != "" { - blockPrefix += "." - } - for n, block := range schema.BlockTypes { - // we need to find the set of all keys that traverse this block - candidateKeys := map[string]bool{} - blockKey := blockPrefix + n + "." - localBlockPrefix := localPrefix + n + "." - - // we can only trust the diff for sets, since the path changes, so don't - // count existing values as candidate keys. If it turns out we're - // keeping the attributes, we will catch it down below with "keepBlock" - // after we check the set count. - if block.Nesting != configschema.NestingSet { - for k := range attrs { - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - } - - for k, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - - if diff.NewRemoved { - continue - } - - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - - // check each set candidate to see if it was removed. - // we need to do this, because when entire sets are removed, they may - // have the wrong key, and ony show diffs going to "" - if block.Nesting == configschema.NestingSet { - for k := range candidateKeys { - indexPrefix := strings.Join(append(path, n, k), ".") + "." - keep := false - // now check each set element to see if it's a new diff, or one - // that we're dropping. Since we're only applying the "New" - // portion of the set, we can ignore diffs that only contain "Old" - for attr, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if !strings.HasPrefix(attr, indexPrefix) { - continue - } - - // check for empty "count" keys - if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { - continue - } - - // removed items don't count either - if diff.NewRemoved { - continue - } - - // this must be a diff to keep - keep = true - break - } - if !keep { - delete(candidateKeys, k) - } - } - } - - for k := range candidateKeys { - newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) - if err != nil { - return result, err - } - - for attr, v := range newAttrs { - result[localBlockPrefix+attr] = v - } - } - - keepBlock := true - // check this block's count diff directly first, since we may not - // have candidates because it was removed and only set to "0" - if diff, ok := d.Attributes[blockKey+"#"]; ok { - if diff.New == "0" || diff.NewRemoved { - keepBlock = false - } - } - - // if there was no diff at all, then we need to keep the block attributes - if len(candidateKeys) == 0 && keepBlock { - for k, v := range attrs { - if strings.HasPrefix(k, blockKey) { - // we need the key relative to this block, so remove the - // entire prefix, then re-insert the block name. - localKey := localBlockPrefix + k[len(blockKey):] - result[localKey] = v - } - } - } - - countAddr := strings.Join(append(path, n, "#"), ".") - if countDiff, ok := d.Attributes[countAddr]; ok { - if countDiff.NewComputed { - result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue - } else { - result[localBlockPrefix+"#"] = countDiff.New - - // While sets are complete, list are not, and we may not have all the - // information to track removals. If the list was truncated, we need to - // remove the extra items from the result. - if block.Nesting == configschema.NestingList && - countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { - length, _ := strconv.Atoi(countDiff.New) - for k := range result { - if !strings.HasPrefix(k, localBlockPrefix) { - continue - } - - index := k[len(localBlockPrefix):] - nextDot := strings.Index(index, ".") - if nextDot < 1 { - continue - } - index = index[:nextDot] - i, err := strconv.Atoi(index) - if err != nil { - // this shouldn't happen since we added these - // ourself, but make note of it just in case. - log.Printf("[ERROR] bad list index in %q: %s", k, err) - continue - } - if i >= length { - delete(result, k) - } - } - } - } - } else if origCount, ok := attrs[countAddr]; ok && keepBlock { - result[localBlockPrefix+"#"] = origCount - } else { - result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) - } - } - - return result, nil -} - -func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - ty := attrSchema.Type - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): - return d.applyCollectionDiff(path, attrs, attrSchema) - case ty.IsSetType(): - return d.applySetDiff(path, attrs, attrSchema) - default: - return d.applySingleAttrDiff(path, attrs, attrSchema) - } -} - -func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - currentKey := strings.Join(path, ".") - - attr := path[len(path)-1] - - result := map[string]string{} - diff := d.Attributes[currentKey] - old, exists := attrs[currentKey] - - if diff != nil && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - // "id" must exist and not be an empty string, or it must be unknown. - // This only applied to top-level "id" fields. - if attr == "id" && len(path) == 1 { - if old == "" { - result[attr] = hcl2shim.UnknownVariableValue - } else { - result[attr] = old - } - return result, nil - } - - // attribute diffs are sometimes missed, so assume no diff means keep the - // old value - if diff == nil { - if exists { - result[attr] = old - } else { - // We need required values, so set those with an empty value. It - // must be set in the config, since if it were missing it would have - // failed validation. - if attrSchema.Required { - // we only set a missing string here, since bool or number types - // would have distinct zero value which shouldn't have been - // lost. - if attrSchema.Type == cty.String { - result[attr] = "" - } - } - } - return result, nil - } - - // check for missmatched diff values - if exists && - old != diff.Old && - old != hcl2shim.UnknownVariableValue && - diff.Old != hcl2shim.UnknownVariableValue { - return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) - } - - if diff.NewRemoved { - // don't set anything in the new value - return map[string]string{}, nil - } - - if diff.Old == diff.New && diff.New == "" { - // this can only be a valid empty string - if attrSchema.Type == cty.String { - result[attr] = "" - } - return result, nil - } - - if attrSchema.Computed && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - result[attr] = diff.New - - return result, nil -} - -func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - result := map[string]string{} - - prefix := "" - if len(path) > 1 { - prefix = strings.Join(path[:len(path)-1], ".") + "." - } - - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - currentKey := prefix + name - - // check the index first for special handling - for k, diff := range d.Attributes { - // check the index value, which can be set, and 0 - if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { - if diff.NewRemoved { - return result, nil - } - - if diff.NewComputed { - result[k[len(prefix):]] = hcl2shim.UnknownVariableValue - return result, nil - } - - // do what the diff tells us to here, so that it's consistent with applies - if diff.New == "0" { - result[k[len(prefix):]] = "0" - return result, nil - } - } - } - - // collect all the keys from the diff and the old state - noDiff := true - keys := map[string]bool{} - for k := range d.Attributes { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noDiff = false - keys[k] = true - } - - noAttrs := true - for k := range attrs { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noAttrs = false - keys[k] = true - } - - // If there's no diff and no attrs, then there's no value at all. - // This prevents an unexpected zero-count attribute in the attributes. - if noDiff && noAttrs { - return result, nil - } - - idx := "#" - if attrSchema.Type.IsMapType() { - idx = "%" - } - - for k := range keys { - // generate an schema placeholder for the values - elSchema := &configschema.Attribute{ - Type: attrSchema.Type.ElementType(), - } - - res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) - if err != nil { - return result, err - } - - for k, v := range res { - result[name+"."+k] = v - } - } - - // Just like in nested list blocks, for simple lists we may need to fill in - // missing empty strings. - countKey := name + "." + idx - count := result[countKey] - length, _ := strconv.Atoi(count) - - if count != "" && count != hcl2shim.UnknownVariableValue && - attrSchema.Type.Equals(cty.List(cty.String)) { - // insert empty strings into missing indexes - for i := 0; i < length; i++ { - key := fmt.Sprintf("%s.%d", name, i) - if _, ok := result[key]; !ok { - result[key] = "" - } - } - } - - // now check for truncation in any type of list - if attrSchema.Type.IsListType() { - for key := range result { - if key == countKey { - continue - } - - if len(key) <= len(name)+1 { - // not sure what this is, but don't panic - continue - } - - index := key[len(name)+1:] - - // It is possible to have nested sets or maps, so look for another dot - dot := strings.Index(index, ".") - if dot > 0 { - index = index[:dot] - } - - // This shouldn't have any more dots, since the element type is only string. - num, err := strconv.Atoi(index) - if err != nil { - log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) - continue - } - - if num >= length { - delete(result, key) - } - } - } - - // Fill in the count value if it wasn't present in the diff for some reason, - // or if there is no count at all. - _, countDiff := d.Attributes[countKey] - if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { - result[countKey] = countFlatmapContainerValues(countKey, result) - } - - return result, nil -} - -func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - // We only need this special behavior for sets of object. - if !attrSchema.Type.ElementType().IsObjectType() { - // The normal collection apply behavior will work okay for this one, then. - return d.applyCollectionDiff(path, attrs, attrSchema) - } - - // When we're dealing with a set of an object type we actually want to - // use our normal _block type_ apply behaviors, so we'll construct ourselves - // a synthetic schema that treats the object type as a block type and - // then delegate to our block apply method. - synthSchema := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - } - - for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { - // We can safely make everything into an attribute here because in the - // event that there are nested set attributes we'll end up back in - // here again recursively and can then deal with the next level of - // expansion. - synthSchema.Attributes[name] = &configschema.Attribute{ - Type: ty, - Optional: true, - } - } - - parentPath := path[:len(path)-1] - childName := path[len(path)-1] - containerSchema := &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - childName: { - Nesting: configschema.NestingSet, - Block: *synthSchema, - }, - }, - } - - return d.applyBlockDiff(parentPath, attrs, containerSchema) -} - -// countFlatmapContainerValues returns the number of values in the flatmapped container -// (set, map, list) indexed by key. The key argument is expected to include the -// trailing ".#", or ".%". -func countFlatmapContainerValues(key string, attrs map[string]string) string { - if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - panic(fmt.Sprintf("invalid index value %q", key)) - } - - prefix := key[:len(key)-1] - items := map[string]int{} - - for k := range attrs { - if k == key { - continue - } - if !strings.HasPrefix(k, prefix) { - continue - } - - suffix := k[len(prefix):] - dot := strings.Index(suffix, ".") - if dot > 0 { - suffix = suffix[:dot] - } - - items[suffix]++ - } - return strconv.Itoa(len(items)) -} - -// ResourceAttrDiff is the diff of a single attribute of a resource. -type ResourceAttrDiff struct { - Old string // Old Value - New string // New Value - NewComputed bool // True if new value is computed (unknown currently) - NewRemoved bool // True if this attribute is being removed - NewExtra interface{} // Extra information for the provider - RequiresNew bool // True if change requires new resource - Sensitive bool // True if the data should not be displayed in UI output - Type DiffAttrType -} - -// Empty returns true if the diff for this attr is neutral -func (d *ResourceAttrDiff) Empty() bool { - return d.Old == d.New && !d.NewComputed && !d.NewRemoved -} - -func (d *ResourceAttrDiff) GoString() string { - return fmt.Sprintf("*%#v", *d) -} - -// DiffAttrType is an enum type that says whether a resource attribute -// diff is an input attribute (comes from the configuration) or an -// output attribute (comes as a result of applying the configuration). An -// example input would be "ami" for AWS and an example output would be -// "private_ip". -type DiffAttrType byte - -const ( - DiffAttrUnknown DiffAttrType = iota - DiffAttrInput - DiffAttrOutput -) - -func (d *InstanceDiff) init() { - if d.Attributes == nil { - d.Attributes = make(map[string]*ResourceAttrDiff) - } -} - -func NewInstanceDiff() *InstanceDiff { - return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} -} - -func (d *InstanceDiff) Copy() (*InstanceDiff, error) { - if d == nil { - return nil, nil - } - - dCopy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - return nil, err - } - - return dCopy.(*InstanceDiff), nil -} - -// ChangeType returns the DiffChangeType represented by the diff -// for this single instance. -func (d *InstanceDiff) ChangeType() DiffChangeType { - if d.Empty() { - return DiffNone - } - - if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { - return DiffDestroyCreate - } - - if d.GetDestroy() || d.GetDestroyDeposed() { - return DiffDestroy - } - - if d.RequiresNew() { - return DiffCreate - } - - return DiffUpdate -} - -// Empty returns true if this diff encapsulates no changes. -func (d *InstanceDiff) Empty() bool { - if d == nil { - return true - } - - d.mu.Lock() - defer d.mu.Unlock() - return !d.Destroy && - !d.DestroyTainted && - !d.DestroyDeposed && - len(d.Attributes) == 0 -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Use DeepEqual - return reflect.DeepEqual(d, d2) -} - -// DeepCopy performs a deep copy of all parts of the InstanceDiff -func (d *InstanceDiff) DeepCopy() *InstanceDiff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*InstanceDiff) -} - -func (d *InstanceDiff) GoString() string { - return fmt.Sprintf("*%#v", InstanceDiff{ - Attributes: d.Attributes, - Destroy: d.Destroy, - DestroyTainted: d.DestroyTainted, - DestroyDeposed: d.DestroyDeposed, - }) -} - -// RequiresNew returns true if the diff requires the creation of a new -// resource (implying the destruction of the old). -func (d *InstanceDiff) RequiresNew() bool { - if d == nil { - return false - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.requiresNew() -} - -func (d *InstanceDiff) requiresNew() bool { - if d == nil { - return false - } - - if d.DestroyTainted { - return true - } - - for _, rd := range d.Attributes { - if rd != nil && rd.RequiresNew { - return true - } - } - - return false -} - -func (d *InstanceDiff) GetDestroyDeposed() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyDeposed -} - -func (d *InstanceDiff) SetDestroyDeposed(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyDeposed = b -} - -// These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within the terraform package. -// TODO refactor the locking scheme -func (d *InstanceDiff) SetTainted(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyTainted = b -} - -func (d *InstanceDiff) GetDestroyTainted() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyTainted -} - -func (d *InstanceDiff) SetDestroy(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Destroy = b -} - -func (d *InstanceDiff) GetDestroy() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.Destroy -} - -func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Attributes[key] = attr -} - -func (d *InstanceDiff) DelAttribute(key string) { - d.mu.Lock() - defer d.mu.Unlock() - - delete(d.Attributes, key) -} - -func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { - d.mu.Lock() - defer d.mu.Unlock() - - attr, ok := d.Attributes[key] - return attr, ok -} -func (d *InstanceDiff) GetAttributesLen() int { - d.mu.Lock() - defer d.mu.Unlock() - - return len(d.Attributes) -} - -// Safely copies the Attributes map -func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { - d.mu.Lock() - defer d.mu.Unlock() - - attrs := make(map[string]*ResourceAttrDiff) - for k, v := range d.Attributes { - attrs[k] = v - } - - return attrs -} - -// Same checks whether or not two InstanceDiff's are the "same". When -// we say "same", it is not necessarily exactly equal. Instead, it is -// just checking that the same attributes are changing, a destroy -// isn't suddenly happening, etc. -func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { - // we can safely compare the pointers without a lock - switch { - case d == nil && d2 == nil: - return true, "" - case d == nil || d2 == nil: - return false, "one nil" - case d == d2: - return true, "" - } - - d.mu.Lock() - defer d.mu.Unlock() - - // If we're going from requiring new to NOT requiring new, then we have - // to see if all required news were computed. If so, it is allowed since - // computed may also mean "same value and therefore not new". - oldNew := d.requiresNew() - newNew := d2.RequiresNew() - if oldNew && !newNew { - oldNew = false - - // This section builds a list of ignorable attributes for requiresNew - // by removing off any elements of collections going to zero elements. - // For collections going to zero, they may not exist at all in the - // new diff (and hence RequiresNew == false). - ignoreAttrs := make(map[string]struct{}) - for k, diffOld := range d.Attributes { - if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { - continue - } - - // This case is in here as a protection measure. The bug that this - // code originally fixed (GH-11349) didn't have to deal with computed - // so I'm not 100% sure what the correct behavior is. Best to leave - // the old behavior. - if diffOld.NewComputed { - continue - } - - // We're looking for the case a map goes to exactly 0. - if diffOld.New != "0" { - continue - } - - // Found it! Ignore all of these. The prefix here is stripping - // off the "%" so it is just "k." - prefix := k[:len(k)-1] - for k2, _ := range d.Attributes { - if strings.HasPrefix(k2, prefix) { - ignoreAttrs[k2] = struct{}{} - } - } - } - - for k, rd := range d.Attributes { - if _, ok := ignoreAttrs[k]; ok { - continue - } - - // If the field is requires new and NOT computed, then what - // we have is a diff mismatch for sure. We set that the old - // diff does REQUIRE a ForceNew. - if rd != nil && rd.RequiresNew && !rd.NewComputed { - oldNew = true - break - } - } - } - - if oldNew != newNew { - return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", oldNew, newNew) - } - - // Verify that destroy matches. The second boolean here allows us to - // have mismatching Destroy if we're moving from RequiresNew true - // to false above. Therefore, the second boolean will only pass if - // we're moving from Destroy: true to false as well. - if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { - return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) - } - - // Go through the old diff and make sure the new diff has all the - // same attributes. To start, build up the check map to be all the keys. - checkOld := make(map[string]struct{}) - checkNew := make(map[string]struct{}) - for k, _ := range d.Attributes { - checkOld[k] = struct{}{} - } - for k, _ := range d2.CopyAttributes() { - checkNew[k] = struct{}{} - } - - // Make an ordered list so we are sure the approximated hashes are left - // to process at the end of the loop - keys := make([]string, 0, len(d.Attributes)) - for k, _ := range d.Attributes { - keys = append(keys, k) - } - sort.StringSlice(keys).Sort() - - for _, k := range keys { - diffOld := d.Attributes[k] - - if _, ok := checkOld[k]; !ok { - // We're not checking this key for whatever reason (see where - // check is modified). - continue - } - - // Remove this key since we'll never hit it again - delete(checkOld, k) - delete(checkNew, k) - - _, ok := d2.GetAttribute(k) - if !ok { - // If there's no new attribute, and the old diff expected the attribute - // to be removed, that's just fine. - if diffOld.NewRemoved { - continue - } - - // If the last diff was a computed value then the absense of - // that value is allowed since it may mean the value ended up - // being the same. - if diffOld.NewComputed { - ok = true - } - - // No exact match, but maybe this is a set containing computed - // values. So check if there is an approximate hash in the key - // and if so, try to match the key. - if strings.Contains(k, "~") { - parts := strings.Split(k, ".") - parts2 := append([]string(nil), parts...) - - re := regexp.MustCompile(`^~\d+$`) - for i, part := range parts { - if re.MatchString(part) { - // we're going to consider this the base of a - // computed hash, and remove all longer matching fields - ok = true - - parts2[i] = `\d+` - parts2 = parts2[:i+1] - break - } - } - - re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) - if err != nil { - return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) - } - - for k2, _ := range checkNew { - if re.MatchString(k2) { - delete(checkNew, k2) - } - } - } - - // This is a little tricky, but when a diff contains a computed - // list, set, or map that can only be interpolated after the apply - // command has created the dependent resources, it could turn out - // that the result is actually the same as the existing state which - // would remove the key from the diff. - if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - // Similarly, in a RequiresNew scenario, a list that shows up in the plan - // diff can disappear from the apply diff, which is calculated from an - // empty state. - if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - if !ok { - return false, fmt.Sprintf("attribute mismatch: %s", k) - } - } - - // search for the suffix of the base of a [computed] map, list or set. - match := multiVal.FindStringSubmatch(k) - - if diffOld.NewComputed && len(match) == 2 { - matchLen := len(match[1]) - - // This is a computed list, set, or map, so remove any keys with - // this prefix from the check list. - kprefix := k[:len(k)-matchLen] - for k2, _ := range checkOld { - if strings.HasPrefix(k2, kprefix) { - delete(checkOld, k2) - } - } - for k2, _ := range checkNew { - if strings.HasPrefix(k2, kprefix) { - delete(checkNew, k2) - } - } - } - - // We don't compare the values because we can't currently actually - // guarantee to generate the same value two two diffs created from - // the same state+config: we have some pesky interpolation functions - // that do not behave as pure functions (uuid, timestamp) and so they - // can be different each time a diff is produced. - // FIXME: Re-organize our config handling so that we don't re-evaluate - // expressions when we produce a second comparison diff during - // apply (for EvalCompareDiff). - } - - // Check for leftover attributes - if len(checkNew) > 0 { - extras := make([]string, 0, len(checkNew)) - for attr, _ := range checkNew { - extras = append(extras, attr) - } - return false, - fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) - } - - return true, "" -} - -// moduleDiffSort implements sort.Interface to sort module diffs by path. -type moduleDiffSort []*ModuleDiff - -func (s moduleDiffSort) Len() int { return len(s) } -func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s moduleDiffSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go deleted file mode 100644 index 2a8909ae..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalNode is the interface that must be implemented by graph nodes to -// evaluate/execute. -type EvalNode interface { - // Eval evaluates this node with the given context. The second parameter - // are the argument values. These will match in order and 1-1 with the - // results of the Args() return value. - Eval(EvalContext) (interface{}, error) -} - -// GraphNodeEvalable is the interface that graph nodes must implement -// to enable valuation. -type GraphNodeEvalable interface { - EvalTree() EvalNode -} - -// EvalEarlyExitError is a special error return value that can be returned -// by eval nodes that does an early exit. -type EvalEarlyExitError struct{} - -func (EvalEarlyExitError) Error() string { return "early exit" } - -// Eval evaluates the given EvalNode with the given context, properly -// evaluating all args in the correct order. -func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { - // Call the lower level eval which doesn't understand early exit, - // and if we early exit, it isn't an error. - result, err := EvalRaw(n, ctx) - if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - return nil, nil - } - } - - return result, err -} - -// EvalRaw is like Eval except that it returns all errors, even if they -// signal something normal such as EvalEarlyExitError. -func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { - log.Printf("[TRACE] eval: %T", n) - output, err := n.Eval(ctx) - if err != nil { - switch err.(type) { - case EvalEarlyExitError: - log.Printf("[TRACE] eval: %T, early exit err: %s", n, err) - case tfdiags.NonFatalError: - log.Printf("[WARN] eval: %T, non-fatal err: %s", n, err) - default: - log.Printf("[ERROR] eval: %T, err: %s", n, err) - } - } - - return output, err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go deleted file mode 100644 index 6c9ed41b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ /dev/null @@ -1,705 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalApply is an EvalNode implementation that writes the diff to -// the full diff. -type EvalApply struct { - Addr addrs.ResourceInstance - Config *configs.Resource - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - CreateNew *bool - Error *error - CreateBeforeDestroy bool -} - -// TODO: test -func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - change := *n.Change - provider := *n.Provider - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - if state == nil { - state = &states.ResourceInstanceObject{} - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - if n.CreateNew != nil { - *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) - } - - configVal := cty.NullVal(cty.DynamicPseudoType) - if n.Config != nil { - var configDiags tfdiags.Diagnostics - forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - - if !configVal.IsWhollyKnown() { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - log.Printf("[DEBUG] EvalApply: ProviderMeta config value set") - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - log.Printf("[DEBUG] EvalApply: no ProviderMeta schema") - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - log.Printf("[DEBUG] EvalApply: ProviderMeta schema found") - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - } - } - - log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) - resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: change.Before, - Config: configVal, - PlannedState: change.After, - PlannedPrivate: change.Private, - ProviderMeta: metaConfigVal, - }) - applyDiags := resp.Diagnostics - if n.Config != nil { - applyDiags = applyDiags.InConfigBody(n.Config.Config) - } - diags = diags.Append(applyDiags) - - // Even if there are errors in the returned diagnostics, the provider may - // have returned a _partial_ state for an object that already exists but - // failed to fully configure, and so the remaining code must always run - // to completion but must be defensive against the new value being - // incomplete. - newVal := resp.NewState - - if newVal == cty.NilVal { - // Providers are supposed to return a partial new value even when errors - // occur, but sometimes they don't and so in that case we'll patch that up - // by just using the prior state, so we'll at least keep track of the - // object for the user to retry. - newVal = change.Before - - // As a special case, we'll set the new value to null if it looks like - // we were trying to execute a delete, because the provider in this case - // probably left the newVal unset intending it to be interpreted as "null". - if change.After.IsNull() { - newVal = cty.NullVal(schema.ImpliedType()) - } - - // Ideally we'd produce an error or warning here if newVal is nil and - // there are no errors in diags, because that indicates a buggy - // provider not properly reporting its result, but unfortunately many - // of our historical test mocks behave in this way and so producing - // a diagnostic here fails hundreds of tests. Instead, we must just - // silently retain the old value for now. Returning a nil value with - // no errors is still always considered a bug in the provider though, - // and should be fixed for any "real" providers that do it. - } - - var conformDiags tfdiags.Diagnostics - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - conformDiags = conformDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - diags = diags.Append(conformDiags) - if conformDiags.HasErrors() { - // Bail early in this particular case, because an object that doesn't - // conform to the schema can't be saved in the state anyway -- the - // serializer will reject it. - return nil, diags.Err() - } - - // After this point we have a type-conforming result object and so we - // must always run to completion to ensure it can be saved. If n.Error - // is set then we must not return a non-nil error, in order to allow - // evaluation to continue to a later point where our state object will - // be saved. - - // By this point there must not be any unknown values remaining in our - // object, because we've applied the change and we can't save unknowns - // in our persistent state. If any are present then we will indicate an - // error (which is always a bug in the provider) but we will also replace - // them with nulls so that we can successfully save the portions of the - // returned value that are known. - if !newVal.IsWhollyKnown() { - // To generate better error messages, we'll go for a walk through the - // value and make a separate diagnostic for each unknown value we - // find. - cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { - if !val.IsKnown() { - pathStr := tfdiags.FormatCtyPath(path) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", - n.Addr.Absolute(ctx.Path()), pathStr, - ), - )) - } - return true, nil - }) - - // NOTE: This operation can potentially be lossy if there are multiple - // elements in a set that differ only by unknown values: after - // replacing with null these will be merged together into a single set - // element. Since we can only get here in the presence of a provider - // bug, we accept this because storing a result here is always a - // best-effort sort of thing. - newVal = cty.UnknownAsNull(newVal) - } - - if change.Action != plans.Delete && !diags.HasErrors() { - // Only values that were marked as unknown in the planned value are allowed - // to change during the apply operation. (We do this after the unknown-ness - // check above so that we also catch anything that became unknown after - // being known during plan.) - // - // If we are returning other errors anyway then we'll give this - // a pass since the other errors are usually the explanation for - // this one and so it's more helpful to let the user focus on the - // root cause rather than distract with this extra problem. - if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.Provider.String(), absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - - // The sort of inconsistency we won't catch here is if a known value - // in the plan is changed during apply. That can cause downstream - // problems because a dependent resource would make its own plan based - // on the planned value, and thus get a different result during the - // apply phase. This will usually lead to a "Provider produced invalid plan" - // error that incorrectly blames the downstream resource for the change. - - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent result after apply", - fmt.Sprintf( - "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.Provider.String(), tfdiags.FormatError(err), - ), - )) - } - } - } - } - - // If a provider returns a null or non-null object at the wrong time then - // we still want to save that but it often causes some confusing behaviors - // where it seems like Terraform is failing to take any action at all, - // so we'll generate some errors to draw attention to it. - if !diags.HasErrors() { - if change.Action == plans.Delete && !newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - if change.Action != plans.Delete && newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - } - - newStatus := states.ObjectReady - - // Sometimes providers return a null value when an operation fails for some - // reason, but we'd rather keep the prior state so that the error can be - // corrected on a subsequent run. We must only do this for null new value - // though, or else we may discard partial updates the provider was able to - // complete. - if diags.HasErrors() && newVal.IsNull() { - // Otherwise, we'll continue but using the prior state as the new value, - // making this effectively a no-op. If the item really _has_ been - // deleted then our next refresh will detect that and fix it up. - // If change.Action is Create then change.Before will also be null, - // which is fine. - newVal = change.Before - - // If we're recovering the previous state, we also want to restore the - // the tainted status of the object. - if state.Status == states.ObjectTainted { - newStatus = states.ObjectTainted - } - } - - var newState *states.ResourceInstanceObject - if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case - newState = &states.ResourceInstanceObject{ - Status: newStatus, - Value: newVal, - Private: resp.Private, - CreateBeforeDestroy: n.CreateBeforeDestroy, - } - } - - // Write the final state - if n.Output != nil { - *n.Output = newState - } - - if diags.HasErrors() { - // If the caller provided an error pointer then they are expected to - // handle the error some other way and we treat our own result as - // success. - if n.Error != nil { - err := diags.Err() - *n.Error = err - log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) - return nil, nil - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalApplyPre is an EvalNode implementation that does the pre-Apply work -type EvalApplyPre struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - change := *n.Change - absAddr := n.Addr.Absolute(ctx.Path()) - - if change == nil { - panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) - } - - if resourceHasUserVisibleApply(n.Addr) { - priorState := change.Before - plannedNewState := change.After - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPost is an EvalNode implementation that does the post-Apply work -type EvalApplyPost struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Error *error -} - -// TODO: test -func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - if resourceHasUserVisibleApply(n.Addr) { - absAddr := n.Addr.Absolute(ctx.Path()) - var newState cty.Value - if state != nil { - newState = state.Value - } else { - newState = cty.NullVal(cty.DynamicPseudoType) - } - var err error - if n.Error != nil { - err = *n.Error - } - - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, n.Gen, newState, err) - }) - if hookErr != nil { - return nil, hookErr - } - } - - return nil, *n.Error -} - -// EvalMaybeTainted is an EvalNode that takes the planned change, new value, -// and possible error from an apply operation and produces a new instance -// object marked as tainted if it appears that a create operation has failed. -// -// This EvalNode never returns an error, to ensure that a subsequent EvalNode -// can still record the possibly-tainted object in the state. -type EvalMaybeTainted struct { - Addr addrs.ResourceInstance - Gen states.Generation - Change **plans.ResourceInstanceChange - State **states.ResourceInstanceObject - Error *error -} - -func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil || n.Change == nil || n.Error == nil { - return nil, nil - } - - state := *n.State - change := *n.Change - err := *n.Error - - // nothing to do if everything went as planned - if err == nil { - return nil, nil - } - - if state != nil && state.Status == states.ObjectTainted { - log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) - return nil, nil - } - - if change.Action == plans.Create { - // If there are errors during a _create_ then the object is - // in an undefined state, and so we'll mark it as tainted so - // we can try again on the next run. - // - // We don't do this for other change actions because errors - // during updates will often not change the remote object at all. - // If there _were_ changes prior to the error, it's the provider's - // responsibility to record the effect of those changes in the - // object value it returned. - log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) - *n.State = state.AsTainted() - } - - return nil, nil -} - -// resourceHasUserVisibleApply returns true if the given resource is one where -// apply actions should be exposed to the user. -// -// Certain resources do apply actions only as an implementation detail, so -// these should not be advertised to code outside of this package. -func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { - // Only managed resources have user-visible apply actions. - // In particular, this excludes data resources since we "apply" these - // only as an implementation detail of removing them from state when - // they are destroyed. (When reading, they don't get here at all because - // we present them as "Refresh" actions.) - return addr.ContainingResource().Mode == addrs.ManagedResourceMode -} - -// EvalApplyProvisioners is an EvalNode implementation that executes -// the provisioners for a resource. -// -// TODO(mitchellh): This should probably be split up into a more fine-grained -// ApplyProvisioner (single) that is looped over. -type EvalApplyProvisioners struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject - ResourceConfig *configs.Resource - CreateNew *bool - Error *error - - // When is the type of provisioner to run at this point - When configs.ProvisionerWhen -} - -// TODO: test -func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - if state == nil { - log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) - return nil, nil - } - if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { - // If we're not creating a new resource, then don't run provisioners - log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) - return nil, nil - } - if state.Status == states.ObjectTainted { - // No point in provisioning an object that is already tainted, since - // it's going to get recreated on the next apply anyway. - log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) - return nil, nil - } - - provs := n.filterProvisioners() - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil, nil - } - - if n.Error != nil && *n.Error != nil { - // We're already tainted, so just return out - return nil, nil - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - err := n.apply(ctx, provs) - if err != nil { - *n.Error = multierror.Append(*n.Error, err) - if n.Error == nil { - return nil, err - } else { - log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) - return nil, nil - } - } - - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { - // Fast path the zero case - if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { - return nil - } - - if len(n.ResourceConfig.Managed.Provisioners) == 0 { - return nil - } - - result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) - for _, p := range n.ResourceConfig.Managed.Provisioners { - if p.When == n.When { - result = append(result, p) - } - } - - return result -} - -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { - var diags tfdiags.Diagnostics - instanceAddr := n.Addr - absAddr := instanceAddr.Absolute(ctx.Path()) - - // If there's a connection block defined directly inside the resource block - // then it'll serve as a base connection configuration for all of the - // provisioners. - var baseConn hcl.Body - if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { - baseConn = n.ResourceConfig.Managed.Connection.Config - } - - for _, prov := range provs { - log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) - - // Get the provisioner - provisioner := ctx.Provisioner(prov.Type) - schema := ctx.ProvisionerSchema(prov.Type) - - var forEach map[string]cty.Value - - // For a destroy-time provisioner forEach is intentionally nil here, - // which EvalDataForInstanceKey responds to by not populating EachValue - // in its result. That's okay because each.value is prohibited for - // destroy-time provisioners. - if n.When != configs.ProvisionerWhenDestroy { - m, forEachDiags := evaluateForEachExpression(n.ResourceConfig.ForEach, ctx) - diags = diags.Append(forEachDiags) - forEach = m - } - - keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) - - // Evaluate the main provisioner configuration. - config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) - diags = diags.Append(configDiags) - - // we can't apply the provisioner if the config has errors - if diags.HasErrors() { - return diags.Err() - } - - // If the provisioner block contains a connection block of its own then - // it can override the base connection configuration, if any. - var localConn hcl.Body - if prov.Connection != nil { - localConn = prov.Connection.Config - } - - var connBody hcl.Body - switch { - case baseConn != nil && localConn != nil: - // Our standard merging logic applies here, similar to what we do - // with _override.tf configuration files: arguments from the - // base connection block will be masked by any arguments of the - // same name in the local connection block. - connBody = configs.MergeBodies(baseConn, localConn) - case baseConn != nil: - connBody = baseConn - case localConn != nil: - connBody = localConn - } - - // start with an empty connInfo - connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) - - if connBody != nil { - var connInfoDiags tfdiags.Diagnostics - connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) - diags = diags.Append(connInfoDiags) - if diags.HasErrors() { - // "on failure continue" setting only applies to failures of the - // provisioner itself, not to invalid configuration. - return diags.Err() - } - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstanceStep(absAddr, prov.Type) - }) - if err != nil { - return err - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(absAddr, prov.Type, msg) - return HookActionContinue, nil - }) - } - - output := CallbackUIOutput{OutputFn: outputFn} - resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: config, - Connection: connInfo, - UIOutput: &output, - }) - applyDiags := resp.Diagnostics.InConfigBody(prov.Config) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) - }) - - switch prov.OnFailure { - case configs.ProvisionerOnFailureContinue: - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) - } else { - // Maybe there are warnings that we still want to see - diags = diags.Append(applyDiags) - } - default: - diags = diags.Append(applyDiags) - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) - return diags.Err() - } - } - - // Deal with the hook - if hookErr != nil { - return hookErr - } - } - - return diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go deleted file mode 100644 index 3272a8e8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/plans" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalPreventDestroy is an EvalNode implementation that returns an -// error if a resource has PreventDestroy configured and the diff -// would destroy the resource. -type EvalCheckPreventDestroy struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Change **plans.ResourceInstanceChange -} - -func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { - return nil, nil - } - - change := *n.Change - preventDestroy := n.Config.Managed.PreventDestroy - - if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Instance cannot be destroyed", - Detail: fmt.Sprintf( - "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", - n.Addr.Absolute(ctx.Path()).String(), - ), - Subject: &n.Config.DeclRange, - }) - return nil, diags.Err() - } - - return nil, nil -} - -const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.` diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go deleted file mode 100644 index 241662f7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ /dev/null @@ -1,168 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// EvalContext is the interface that is given to eval nodes to execute. -type EvalContext interface { - // Stopped returns a channel that is closed when evaluation is stopped - // via Terraform.Context.Stop() - Stopped() <-chan struct{} - - // Path is the current module path. - Path() addrs.ModuleInstance - - // Hook is used to call hook methods. The callback is called for each - // hook and should return the hook action to take and the error. - Hook(func(Hook) (HookAction, error)) error - - // Input is the UIInput object for interacting with the UI. - Input() UIInput - - // InitProvider initializes the provider with the given address, and returns - // the implementation of the resource provider or an error. - // - // It is an error to initialize the same provider more than once. This - // method will panic if the module instance address of the given provider - // configuration does not match the Path() of the EvalContext. - InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) - - // Provider gets the provider instance with the given address (already - // initialized) or returns nil if the provider isn't initialized. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - // InitProvider must've been called on the EvalContext of the module - // that owns the given provider before calling this method. - Provider(addrs.AbsProviderConfig) providers.Interface - - // ProviderSchema retrieves the schema for a particular provider, which - // must have already been initialized with InitProvider. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema - - // CloseProvider closes provider connections that aren't needed anymore. - // - // This method will panic if the module instance address of the given - // provider configuration does not match the Path() of the EvalContext. - CloseProvider(addrs.AbsProviderConfig) error - - // ConfigureProvider configures the provider with the given - // configuration. This is a separate context call because this call - // is used to store the provider configuration for inheritance lookups - // with ParentProviderConfig(). - // - // This method will panic if the module instance address of the given - // provider configuration does not match the Path() of the EvalContext. - ConfigureProvider(addrs.AbsProviderConfig, cty.Value) tfdiags.Diagnostics - - // ProviderInput and SetProviderInput are used to configure providers - // from user input. - // - // These methods will panic if the module instance address of the given - // provider configuration does not match the Path() of the EvalContext. - ProviderInput(addrs.AbsProviderConfig) map[string]cty.Value - SetProviderInput(addrs.AbsProviderConfig, map[string]cty.Value) - - // InitProvisioner initializes the provisioner with the given name and - // returns the implementation of the resource provisioner or an error. - // - // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (provisioners.Interface, error) - - // Provisioner gets the provisioner instance with the given name (already - // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) provisioners.Interface - - // ProvisionerSchema retrieves the main configuration schema for a - // particular provisioner, which must have already been initialized with - // InitProvisioner. - ProvisionerSchema(string) *configschema.Block - - // CloseProvisioner closes provisioner connections that aren't needed - // anymore. - CloseProvisioner(string) error - - // EvaluateBlock takes the given raw configuration block and associated - // schema and evaluates it to produce a value of an object type that - // conforms to the implied type of the schema. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - // - // The "key" argument is also optional. If given, it is the instance key - // of the current object within the multi-instance container it belongs - // to. For example, on a resource block with "count" set this should be - // set to a different addrs.IntKey for each instance created from that - // block. Set this to addrs.NoKey if not appropriate. - // - // The returned body is an expanded version of the given body, with any - // "dynamic" blocks replaced with zero or more static blocks. This can be - // used to extract correct source location information about attributes of - // the returned object value. - EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) - - // EvaluateExpr takes the given HCL expression and evaluates it to produce - // a value. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) - - // EvaluationScope returns a scope that can be used to evaluate reference - // addresses in this context. - EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope - - // SetModuleCallArguments defines values for the variables of a particular - // child module call. - // - // Calling this function multiple times has merging behavior, keeping any - // previously-set keys that are not present in the new map. - SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) - - // GetVariableValue returns the value provided for the input variable with - // the given address, or cty.DynamicVal if the variable hasn't been assigned - // a value yet. - // - // Most callers should deal with variable values only indirectly via - // EvaluationScope and the other expression evaluation functions, but - // this is provided because variables tend to be evaluated outside of - // the context of the module they belong to and so we sometimes need to - // override the normal expression evaluation behavior. - GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value - - // Changes returns the writer object that can be used to write new proposed - // changes into the global changes set. - Changes() *plans.ChangesSync - - // State returns a wrapper object that provides safe concurrent access to - // the global state. - State() *states.SyncState - - // InstanceExpander returns a helper object for tracking the expansion of - // graph nodes during the plan phase in response to "count" and "for_each" - // arguments. - // - // The InstanceExpander is a global object that is shared across all of the - // EvalContext objects for a given configuration. - InstanceExpander() *instances.Expander - - // WithPath returns a copy of the context with the internal path set to the - // path argument. - WithPath(path addrs.ModuleInstance) EvalContext -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go deleted file mode 100644 index 4c2ff128..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ /dev/null @@ -1,355 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/version" - - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// BuiltinEvalContext is an EvalContext implementation that is used by -// Terraform by default. -type BuiltinEvalContext struct { - // StopContext is the context used to track whether we're complete - StopContext context.Context - - // PathValue is the Path that this context is operating within. - PathValue addrs.ModuleInstance - - // pathSet indicates that this context was explicitly created for a - // specific path, and can be safely used for evaluation. This lets us - // differentiate between PathValue being unset, and the zero value which is - // equivalent to RootModuleInstance. Path and Evaluation methods will - // panic if this is not set. - pathSet bool - - // Evaluator is used for evaluating expressions within the scope of this - // eval context. - Evaluator *Evaluator - - // Schemas is a repository of all of the schemas we should need to - // decode configuration blocks and expressions. This must be constructed by - // the caller to include schemas for all of the providers, resource types, - // data sources and provisioners used by the given configuration and - // state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // VariableValues contains the variable values across all modules. This - // structure is shared across the entire containing context, and so it - // may be accessed only when holding VariableValuesLock. - // The keys of the first level of VariableValues are the string - // representations of addrs.ModuleInstance values. The second-level keys - // are variable names within each module instance. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - Components contextComponentFactory - Hooks []Hook - InputValue UIInput - ProviderCache map[string]providers.Interface - ProviderInputConfig map[string]map[string]cty.Value - ProviderLock *sync.Mutex - ProvisionerCache map[string]provisioners.Interface - ProvisionerLock *sync.Mutex - ChangesValue *plans.ChangesSync - StateValue *states.SyncState - InstanceExpanderValue *instances.Expander -} - -// BuiltinEvalContext implements EvalContext -var _ EvalContext = (*BuiltinEvalContext)(nil) - -func (ctx *BuiltinEvalContext) WithPath(path addrs.ModuleInstance) EvalContext { - ctx.pathSet = true - newCtx := *ctx - newCtx.PathValue = path - return &newCtx -} - -func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { - // This can happen during tests. During tests, we just block forever. - if ctx.StopContext == nil { - return nil - } - - return ctx.StopContext.Done() -} - -func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - for _, h := range ctx.Hooks { - action, err := fn(h) - if err != nil { - return err - } - - switch action { - case HookActionContinue: - continue - case HookActionHalt: - // Return an early exit error to trigger an early exit - log.Printf("[WARN] Early exit triggered by hook: %T", h) - return EvalEarlyExitError{} - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Input() UIInput { - return ctx.InputValue -} - -func (ctx *BuiltinEvalContext) InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) { - // If we already initialized, it is an error - if p := ctx.Provider(addr); p != nil { - return nil, fmt.Errorf("%s is already initialized", addr) - } - - // Warning: make sure to acquire these locks AFTER the call to Provider - // above, since it also acquires locks. - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.String() - - p, err := ctx.Components.ResourceProvider(addr.Provider) - if err != nil { - return nil, err - } - - log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", addr.String(), addr) - ctx.ProviderCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - return ctx.ProviderCache[addr.String()] -} - -func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - return ctx.Schemas.ProviderSchema(addr.Provider) -} - -func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.String() - provider := ctx.ProviderCache[key] - if provider != nil { - delete(ctx.ProviderCache, key) - return provider.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if !addr.Module.Equal(ctx.Path().Module()) { - // This indicates incorrect use of ConfigureProvider: it should be used - // only from the module that the provider configuration belongs to. - panic(fmt.Sprintf("%s configured by wrong module %s", addr, ctx.Path())) - } - - p := ctx.Provider(addr) - if p == nil { - diags = diags.Append(fmt.Errorf("%s not initialized", addr)) - return diags - } - - providerSchema := ctx.ProviderSchema(addr) - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("schema for %s is not available", addr)) - return diags - } - - req := providers.ConfigureRequest{ - TerraformVersion: version.String(), - Config: cfg, - } - - resp := p.Configure(req) - return resp.Diagnostics -} - -func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.AbsProviderConfig) map[string]cty.Value { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - if !pc.Module.Equal(ctx.Path().Module()) { - // This indicates incorrect use of InitProvider: it should be used - // only from the module that the provider configuration belongs to. - panic(fmt.Sprintf("%s initialized by wrong module %s", pc, ctx.Path())) - } - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - return nil - } - - return ctx.ProviderInputConfig[pc.String()] -} - -func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.AbsProviderConfig, c map[string]cty.Value) { - absProvider := pc - if !pc.Module.IsRoot() { - // Only root module provider configurations can have input. - log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") - return - } - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[absProvider.String()] = c - ctx.ProviderLock.Unlock() -} - -func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - // If we already initialized, it is an error - if p := ctx.Provisioner(n); p != nil { - return nil, fmt.Errorf("Provisioner '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provisioner - // above, since it also acquires locks. - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - p, err := ctx.Components.ResourceProvisioner(n) - if err != nil { - return nil, err - } - - ctx.ProvisionerCache[n] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - return ctx.ProvisionerCache[n] -} - -func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { - return ctx.Schemas.ProvisionerConfig(n) -} - -func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - prov := ctx.ProvisionerCache[n] - if prov != nil { - return prov.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - scope := ctx.EvaluationScope(self, keyData) - body, evalDiags := scope.ExpandBlock(body, schema) - diags = diags.Append(evalDiags) - val, evalDiags := scope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - return val, body, diags -} - -func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) - return scope.EvalExpr(expr, wantType) -} - -func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - if !ctx.pathSet { - panic("context path not set") - } - data := &evaluationStateData{ - Evaluator: ctx.Evaluator, - ModulePath: ctx.PathValue, - InstanceKeyData: keyData, - Operation: ctx.Evaluator.Operation, - } - return ctx.Evaluator.Scope(data, self) -} - -func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { - if !ctx.pathSet { - panic("context path not set") - } - return ctx.PathValue -} - -func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - if !ctx.pathSet { - panic("context path not set") - } - - childPath := n.ModuleInstance(ctx.PathValue) - key := childPath.String() - - args := ctx.VariableValues[key] - if args == nil { - args = make(map[string]cty.Value) - ctx.VariableValues[key] = vals - return - } - - for k, v := range vals { - args[k] = v - } -} - -func (ctx *BuiltinEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - modKey := addr.Module.String() - modVars := ctx.VariableValues[modKey] - val, ok := modVars[addr.Variable.Name] - if !ok { - return cty.DynamicVal - } - return val -} - -func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { - return ctx.ChangesValue -} - -func (ctx *BuiltinEvalContext) State() *states.SyncState { - return ctx.StateValue -} - -func (ctx *BuiltinEvalContext) InstanceExpander() *instances.Expander { - return ctx.InstanceExpanderValue -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go deleted file mode 100644 index ecb01d64..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ /dev/null @@ -1,344 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// MockEvalContext is a mock version of EvalContext that can be used -// for tests. -type MockEvalContext struct { - StoppedCalled bool - StoppedValue <-chan struct{} - - HookCalled bool - HookHook Hook - HookError error - - InputCalled bool - InputInput UIInput - - InitProviderCalled bool - InitProviderType string - InitProviderAddr addrs.AbsProviderConfig - InitProviderProvider providers.Interface - InitProviderError error - - ProviderCalled bool - ProviderAddr addrs.AbsProviderConfig - ProviderProvider providers.Interface - - ProviderSchemaCalled bool - ProviderSchemaAddr addrs.AbsProviderConfig - ProviderSchemaSchema *ProviderSchema - - CloseProviderCalled bool - CloseProviderAddr addrs.AbsProviderConfig - CloseProviderProvider providers.Interface - - ProviderInputCalled bool - ProviderInputAddr addrs.AbsProviderConfig - ProviderInputValues map[string]cty.Value - - SetProviderInputCalled bool - SetProviderInputAddr addrs.AbsProviderConfig - SetProviderInputValues map[string]cty.Value - - ConfigureProviderCalled bool - ConfigureProviderAddr addrs.AbsProviderConfig - ConfigureProviderConfig cty.Value - ConfigureProviderDiags tfdiags.Diagnostics - - InitProvisionerCalled bool - InitProvisionerName string - InitProvisionerProvisioner provisioners.Interface - InitProvisionerError error - - ProvisionerCalled bool - ProvisionerName string - ProvisionerProvisioner provisioners.Interface - - ProvisionerSchemaCalled bool - ProvisionerSchemaName string - ProvisionerSchemaSchema *configschema.Block - - CloseProvisionerCalled bool - CloseProvisionerName string - CloseProvisionerProvisioner provisioners.Interface - - EvaluateBlockCalled bool - EvaluateBlockBody hcl.Body - EvaluateBlockSchema *configschema.Block - EvaluateBlockSelf addrs.Referenceable - EvaluateBlockKeyData InstanceKeyEvalData - EvaluateBlockResultFunc func( - body hcl.Body, - schema *configschema.Block, - self addrs.Referenceable, - keyData InstanceKeyEvalData, - ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateBlockResult cty.Value - EvaluateBlockExpandedBody hcl.Body - EvaluateBlockDiags tfdiags.Diagnostics - - EvaluateExprCalled bool - EvaluateExprExpr hcl.Expression - EvaluateExprWantType cty.Type - EvaluateExprSelf addrs.Referenceable - EvaluateExprResultFunc func( - expr hcl.Expression, - wantType cty.Type, - self addrs.Referenceable, - ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateExprResult cty.Value - EvaluateExprDiags tfdiags.Diagnostics - - EvaluationScopeCalled bool - EvaluationScopeSelf addrs.Referenceable - EvaluationScopeKeyData InstanceKeyEvalData - EvaluationScopeScope *lang.Scope - - PathCalled bool - PathPath addrs.ModuleInstance - - SetModuleCallArgumentsCalled bool - SetModuleCallArgumentsModule addrs.ModuleCallInstance - SetModuleCallArgumentsValues map[string]cty.Value - - GetVariableValueCalled bool - GetVariableValueAddr addrs.AbsInputVariableInstance - GetVariableValueValue cty.Value - - ChangesCalled bool - ChangesChanges *plans.ChangesSync - - StateCalled bool - StateState *states.SyncState - - InstanceExpanderCalled bool - InstanceExpanderExpander *instances.Expander -} - -// MockEvalContext implements EvalContext -var _ EvalContext = (*MockEvalContext)(nil) - -func (c *MockEvalContext) Stopped() <-chan struct{} { - c.StoppedCalled = true - return c.StoppedValue -} - -func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - c.HookCalled = true - if c.HookHook != nil { - if _, err := fn(c.HookHook); err != nil { - return err - } - } - - return c.HookError -} - -func (c *MockEvalContext) Input() UIInput { - c.InputCalled = true - return c.InputInput -} - -func (c *MockEvalContext) InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) { - c.InitProviderCalled = true - c.InitProviderType = addr.String() - c.InitProviderAddr = addr - return c.InitProviderProvider, c.InitProviderError -} - -func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - c.ProviderCalled = true - c.ProviderAddr = addr - return c.ProviderProvider -} - -func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - c.ProviderSchemaCalled = true - c.ProviderSchemaAddr = addr - return c.ProviderSchemaSchema -} - -func (c *MockEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { - c.CloseProviderCalled = true - c.CloseProviderAddr = addr - return nil -} - -func (c *MockEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - c.ConfigureProviderCalled = true - c.ConfigureProviderAddr = addr - c.ConfigureProviderConfig = cfg - return c.ConfigureProviderDiags -} - -func (c *MockEvalContext) ProviderInput(addr addrs.AbsProviderConfig) map[string]cty.Value { - c.ProviderInputCalled = true - c.ProviderInputAddr = addr - return c.ProviderInputValues -} - -func (c *MockEvalContext) SetProviderInput(addr addrs.AbsProviderConfig, vals map[string]cty.Value) { - c.SetProviderInputCalled = true - c.SetProviderInputAddr = addr - c.SetProviderInputValues = vals -} - -func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - c.InitProvisionerCalled = true - c.InitProvisionerName = n - return c.InitProvisionerProvisioner, c.InitProvisionerError -} - -func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { - c.ProvisionerCalled = true - c.ProvisionerName = n - return c.ProvisionerProvisioner -} - -func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { - c.ProvisionerSchemaCalled = true - c.ProvisionerSchemaName = n - return c.ProvisionerSchemaSchema -} - -func (c *MockEvalContext) CloseProvisioner(n string) error { - c.CloseProvisionerCalled = true - c.CloseProvisionerName = n - return nil -} - -func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - c.EvaluateBlockCalled = true - c.EvaluateBlockBody = body - c.EvaluateBlockSchema = schema - c.EvaluateBlockSelf = self - c.EvaluateBlockKeyData = keyData - if c.EvaluateBlockResultFunc != nil { - return c.EvaluateBlockResultFunc(body, schema, self, keyData) - } - return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags -} - -func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - c.EvaluateExprCalled = true - c.EvaluateExprExpr = expr - c.EvaluateExprWantType = wantType - c.EvaluateExprSelf = self - if c.EvaluateExprResultFunc != nil { - return c.EvaluateExprResultFunc(expr, wantType, self) - } - return c.EvaluateExprResult, c.EvaluateExprDiags -} - -// installSimpleEval is a helper to install a simple mock implementation of -// both EvaluateBlock and EvaluateExpr into the receiver. -// -// These default implementations will either evaluate the given input against -// the scope in field EvaluationScopeScope or, if it is nil, with no eval -// context at all so that only constant values may be used. -// -// This function overwrites any existing functions installed in fields -// EvaluateBlockResultFunc and EvaluateExprResultFunc. -func (c *MockEvalContext) installSimpleEval() { - c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - var diags tfdiags.Diagnostics - body, diags = scope.ExpandBlock(body, schema) - if diags.HasErrors() { - return cty.DynamicVal, body, diags - } - val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return cty.DynamicVal, body, diags - } - return val, body, diags - } - - // Fallback codepath supporting constant values only. - val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) - return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) - } - c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - return scope.EvalExpr(expr, wantType) - } - - // Fallback codepath supporting constant values only. - var diags tfdiags.Diagnostics - val, hclDiags := expr.Value(nil) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return cty.DynamicVal, diags - } - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - diags = diags.Append(err) - return cty.DynamicVal, diags - } - return val, diags - } -} - -func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - c.EvaluationScopeCalled = true - c.EvaluationScopeSelf = self - c.EvaluationScopeKeyData = keyData - return c.EvaluationScopeScope -} - -func (c *MockEvalContext) WithPath(path addrs.ModuleInstance) EvalContext { - newC := *c - newC.PathPath = path - return &newC -} - -func (c *MockEvalContext) Path() addrs.ModuleInstance { - c.PathCalled = true - return c.PathPath -} - -func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { - c.SetModuleCallArgumentsCalled = true - c.SetModuleCallArgumentsModule = n - c.SetModuleCallArgumentsValues = values -} - -func (c *MockEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { - c.GetVariableValueCalled = true - c.GetVariableValueAddr = addr - return c.GetVariableValueValue -} - -func (c *MockEvalContext) Changes() *plans.ChangesSync { - c.ChangesCalled = true - return c.ChangesChanges -} - -func (c *MockEvalContext) State() *states.SyncState { - c.StateCalled = true - return c.StateState -} - -func (c *MockEvalContext) InstanceExpander() *instances.Expander { - c.InstanceExpanderCalled = true - return c.InstanceExpanderExpander -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go deleted file mode 100644 index b09c72e6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go +++ /dev/null @@ -1,124 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// evaluateCountExpression is our standard mechanism for interpreting an -// expression given for a "count" argument on a resource or a module. This -// should be called during expansion in order to determine the final count -// value. -// -// evaluateCountExpression differs from evaluateCountExpressionValue by -// returning an error if the count value is not known, and converting the -// cty.Value to an integer. -func evaluateCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { - countVal, diags := evaluateCountExpressionValue(expr, ctx) - if !countVal.IsKnown() { - // Currently this is a rather bad outcome from a UX standpoint, since we have - // no real mechanism to deal with this situation and all we can do is produce - // an error message. - // FIXME: In future, implement a built-in mechanism for deferring changes that - // can't yet be predicted, and use it to guide the user through several - // plan/apply steps until the desired configuration is eventually reached. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, - Subject: expr.Range().Ptr(), - }) - } - - if countVal.IsNull() || !countVal.IsKnown() { - return -1, diags - } - - count, _ := countVal.AsBigFloat().Int64() - return int(count), diags -} - -// evaluateCountExpressionValue is like evaluateCountExpression -// except that it returns a cty.Value which must be a cty.Number and can be -// unknown. -func evaluateCountExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - nullCount := cty.NullVal(cty.Number) - if expr == nil { - return nullCount, nil - } - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return nullCount, diags - } - - switch { - case countVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return nullCount, diags - - case !countVal.IsKnown(): - return cty.UnknownVal(cty.Number), diags - } - - var count int - err := gocty.FromCtyValue(countVal, &count) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return nullCount, diags - } - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, - Subject: expr.Range().Ptr(), - }) - return nullCount, diags - } - - return countVal, diags -} - -// fixResourceCountSetTransition is a helper function to fix up the state when a -// resource transitions its "count" from being set to unset or vice-versa, -// treating a 0-key and a no-key instance as aliases for one another across -// the transition. -// -// The correct time to call this function is in the DynamicExpand method for -// a node representing a resource, just after evaluating the count with -// evaluateCountExpression, and before any other analysis of the -// state such as orphan detection. -// -// This function calls methods on the given EvalContext to update the current -// state in-place, if necessary. It is a no-op if there is no count transition -// taking place. -// -// Since the state is modified in-place, this function must take a writer lock -// on the state. The caller must therefore not also be holding a state lock, -// or this function will block forever awaiting the lock. -func fixResourceCountSetTransition(ctx EvalContext, addr addrs.ConfigResource, countEnabled bool) { - state := ctx.State() - changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) - if changed { - log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go deleted file mode 100644 index 855f1489..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go +++ /dev/null @@ -1,76 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -// -// This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct { - Config *configs.Config -} - -// TODO: test -func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // We'll temporarily lock the state to grab the modules, then work on each - // one separately while taking a lock again for each separate resource. - // This means that if another caller concurrently adds a module here while - // we're working then we won't update it, but that's no worse than the - // concurrent writer blocking for our entire fixup process and _then_ - // adding a new module, and in practice the graph node associated with - // this eval depends on everything else in the graph anyway, so there - // should not be concurrent writers. - state := ctx.State().Lock() - moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) - for _, m := range state.Modules { - moduleAddrs = append(moduleAddrs, m.Addr) - } - ctx.State().Unlock() - - for _, addr := range moduleAddrs { - cfg := n.Config.DescendentForInstance(addr) - if cfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - if err := n.fixModule(ctx, addr); err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { - ms := ctx.State().Module(moduleAddr) - cfg := n.Config.DescendentForInstance(moduleAddr) - if ms == nil { - // Theoretically possible for a concurrent writer to delete a module - // while we're running, but in practice the graph node that called us - // depends on everything else in the graph and so there can never - // be a concurrent writer. - return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) - } - if cfg == nil { - return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) - } - - for _, r := range ms.Resources { - rCfg := cfg.Module.ResourceByAddr(r.Addr.Resource) - if rCfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", r.Addr) - continue - } - hasCount := rCfg.Count != nil - fixResourceCountSetTransition(ctx, r.Addr.Config(), hasCount) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go deleted file mode 100644 index 509bd5bb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ /dev/null @@ -1,831 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalCheckPlannedChange is an EvalNode implementation that produces errors -// if the _actual_ expected value is not compatible with what was recorded -// in the plan. -// -// Errors here are most often indicative of a bug in the provider, so our -// error messages will report with that in mind. It's also possible that -// there's a bug in Terraform's Core's own "proposed new value" code in -// EvalDiff. -type EvalCheckPlannedChange struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // We take ResourceInstanceChange objects here just because that's what's - // convenient to pass in from the evaltree implementation, but we really - // only look at the "After" value of each change. - Planned, Actual **plans.ResourceInstanceChange -} - -func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - plannedChange := *n.Planned - actualChange := *n.Actual - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) - } - - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - - log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) - - if plannedChange.Action != actualChange.Action { - switch { - case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: - // It's okay for an update to become a NoOp once we've filled in - // all of the unknown values, since the final values might actually - // match what was there before after all. - log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.Provider.String(), - plannedChange.Action, actualChange.Action, - ), - )) - } - } - - errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.Provider.String(), tfdiags.FormatError(err), - ), - )) - } - return nil, diags.Err() -} - -// EvalDiff is an EvalNode implementation that detects changes for a given -// resource instance. -type EvalDiff struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - PreviousDiff **plans.ResourceInstanceChange - - // CreateBeforeDestroy is set if either the resource's own config sets - // create_before_destroy explicitly or if dependencies have forced the - // resource to be handled as create_before_destroy in order to avoid - // a dependency cycle. - CreateBeforeDestroy bool - - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputState **states.ResourceInstanceObject - - Stub bool -} - -// TODO: test -func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - - if providerSchema == nil { - return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) - } - if n.ProviderAddr.Provider.Type == "" { - panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) - } - - var diags tfdiags.Diagnostics - - // Evaluate the configuration - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - } - } - - absAddr := n.Addr.Absolute(ctx.Path()) - var priorVal cty.Value - var priorValTainted cty.Value - var priorPrivate []byte - if state != nil { - if state.Status != states.ObjectTainted { - priorVal = state.Value - priorPrivate = state.Private - } else { - // If the prior state is tainted then we'll proceed below like - // we're creating an entirely new object, but then turn it into - // a synthetic "Replace" change at the end, creating the same - // result as if the provider had marked at least one argument - // change as "requires replacement". - priorValTainted = state.Value - priorVal = cty.NullVal(schema.ImpliedType()) - } - } else { - priorVal = cty.NullVal(schema.ImpliedType()) - } - - proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) - - // Call pre-diff hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - } - - log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) - // Allow the provider to validate the final set of values. - // The config was statically validated early on, but there may have been - // unknown values which the provider could not validate at the time. - validateResp := provider.ValidateResourceTypeConfig( - providers.ValidateResourceTypeConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() - } - - // The provider gets an opportunity to customize the proposed new value, - // which in turn produces the _planned_ new value. But before - // we send back this information, we need to process ignore_changes - // so that CustomizeDiff will not act on them - var ignoreChangeDiags tfdiags.Diagnostics - proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: priorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: priorPrivate, - ProviderMeta: metaConfigVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - plannedNewVal := resp.PlannedState - plannedPrivate := resp.PlannedPrivate - - if plannedNewVal == cty.NilVal { - // Should never happen. Since real-world providers return via RPC a nil - // is always a bug in the client-side stub. This is more likely caused - // by an incompletely-configured mock provider in tests, though. - panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) - } - - // We allow the planned new value to disagree with configuration _values_ - // here, since that allows the provider to do special logic like a - // DiffSuppressFunc, but we still require that the provider produces - // a value whose type conforms to the schema. - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, - "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", - n.ProviderAddr.Provider.String(), absAddr, - ) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - return nil, diags.Err() - } - } - - // TODO: We should be able to remove this repeat of processing ignored changes - // after the plan, which helps providers relying on old behavior "just work" - // in the next major version, such that we can be stricter about ignore_changes - // values - plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - // The provider produces a list of paths to attributes whose changes mean - // that we must replace rather than update an existing remote object. - // However, we only need to do that if the identified attributes _have_ - // actually changed -- particularly after we may have undone some of the - // changes in processIgnoreChanges -- so now we'll filter that list to - // include only where changes are detected. - reqRep := cty.NewPathSet() - if len(resp.RequiresReplace) > 0 { - for _, path := range resp.RequiresReplace { - if priorVal.IsNull() { - // If prior is null then we don't expect any RequiresReplace at all, - // because this is a Create action. - continue - } - - priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) - plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) - if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { - // This means the path was invalid in both the prior and new - // values, which is an error with the provider itself. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, path, - ), - )) - continue - } - - // Make sure we have valid Values for both values. - // Note: if the opposing value was of the type - // cty.DynamicPseudoType, the type assigned here may not exactly - // match the schema. This is fine here, since we're only going to - // check for equality, but if the NullVal is to be used, we need to - // check the schema for th true type. - switch { - case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: - // this should never happen without ApplyPath errors above - panic("requires replace path returned 2 nil values") - case priorChangedVal == cty.NilVal: - priorChangedVal = cty.NullVal(plannedChangedVal.Type()) - case plannedChangedVal == cty.NilVal: - plannedChangedVal = cty.NullVal(priorChangedVal.Type()) - } - - eqV := plannedChangedVal.Equals(priorChangedVal) - if !eqV.IsKnown() || eqV.False() { - reqRep.Add(path) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - eqV := plannedNewVal.Equals(priorVal) - eq := eqV.IsKnown() && eqV.True() - - var action plans.Action - switch { - case priorVal.IsNull(): - action = plans.Create - case eq: - action = plans.NoOp - case !reqRep.Empty(): - // If there are any "requires replace" paths left _after our filtering - // above_ then this is a replace action. - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - default: - action = plans.Update - // "Delete" is never chosen here, because deletion plans are always - // created more directly elsewhere, such as in "orphan" handling. - } - - if action.IsReplace() { - // In this strange situation we want to produce a change object that - // shows our real prior object but has a _new_ object that is built - // from a null prior object, since we're going to delete the one - // that has all the computed values on it. - // - // Therefore we'll ask the provider to plan again here, giving it - // a null object for the prior, and then we'll meld that with the - // _actual_ prior state to produce a correctly-shaped replace change. - // The resulting change should show any computed attributes changing - // from known prior values to unknown values, unless the provider is - // able to predict new values for any of these computed attributes. - nullPriorVal := cty.NullVal(schema.ImpliedType()) - - // create a new proposed value from the null state and the config - proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) - - resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: nullPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: plannedPrivate, - ProviderMeta: metaConfigVal, - }) - // We need to tread carefully here, since if there are any warnings - // in here they probably also came out of our previous call to - // PlanResourceChange above, and so we don't want to repeat them. - // Consequently, we break from the usual pattern here and only - // append these new diagnostics if there's at least one error inside. - if resp.Diagnostics.HasErrors() { - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - return nil, diags.Err() - } - plannedNewVal = resp.PlannedState - plannedPrivate = resp.PlannedPrivate - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // If our prior value was tainted then we actually want this to appear - // as a replace change, even though so far we've been treating it as a - // create. - if action == plans.Create && priorValTainted != cty.NilVal { - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - priorVal = priorValTainted - } - - // As a special case, if we have a previous diff (presumably from the plan - // phases, whereas we're now in the apply phase) and it was for a replace, - // we've already deleted the original object from state by the time we - // get here and so we would've ended up with a _create_ action this time, - // which we now need to paper over to get a result consistent with what - // we originally intended. - if n.PreviousDiff != nil { - prevChange := *n.PreviousDiff - if prevChange.Action.IsReplace() && action == plans.Create { - log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) - action = prevChange.Action - priorVal = prevChange.Before - } - } - - // Call post-refresh hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) - }) - if err != nil { - return nil, err - } - } - - // Update our output if we care - if n.OutputChange != nil { - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - Private: plannedPrivate, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: action, - Before: priorVal, - After: plannedNewVal, - }, - RequiredReplace: reqRep, - } - } - - if n.OutputValue != nil { - *n.OutputValue = configVal - } - - // Update the state if we care - if n.OutputState != nil { - *n.OutputState = &states.ResourceInstanceObject{ - // We use the special "planned" status here to note that this - // object's value is not yet complete. Objects with this status - // cannot be used during expression evaluation, so the caller - // must _also_ record the returned change in the active plan, - // which the expression evaluator will use in preference to this - // incomplete value recorded in the state. - Status: states.ObjectPlanned, - Value: plannedNewVal, - Private: plannedPrivate, - } - } - - return nil, nil -} - -func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { - // ignore_changes only applies when an object already exists, since we - // can't ignore changes to a thing we've not created yet. - if prior.IsNull() { - return proposed, nil - } - - ignoreChanges := n.Config.Managed.IgnoreChanges - ignoreAll := n.Config.Managed.IgnoreAllChanges - - if len(ignoreChanges) == 0 && !ignoreAll { - return proposed, nil - } - if ignoreAll { - return prior, nil - } - if prior.IsNull() || proposed.IsNull() { - // Ignore changes doesn't apply when we're creating for the first time. - // Proposed should never be null here, but if it is then we'll just let it be. - return proposed, nil - } - - return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) -} - -func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { - // When we walk below we will be using cty.Path values for comparison, so - // we'll convert our traversals here so we can compare more easily. - ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) - for i, traversal := range ignoreChanges { - path := make(cty.Path, len(traversal)) - for si, step := range traversal { - switch ts := step.(type) { - case hcl.TraverseRoot: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseAttr: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseIndex: - path[si] = cty.IndexStep{ - Key: ts.Key, - } - default: - panic(fmt.Sprintf("unsupported traversal step %#v", step)) - } - } - ignoreChangesPath[i] = path - } - - var diags tfdiags.Diagnostics - ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { - // First we must see if this is a path that's being ignored at all. - // We're looking for an exact match here because this walk will visit - // leaf values first and then their containers, and we want to do - // the "ignore" transform once we reach the point indicated, throwing - // away any deeper values we already produced at that point. - var ignoreTraversal hcl.Traversal - for i, candidate := range ignoreChangesPath { - if path.Equals(candidate) { - ignoreTraversal = ignoreChanges[i] - } - } - if ignoreTraversal == nil { - return v, nil - } - - // If we're able to follow the same path through the prior value, - // we'll take the value there instead, effectively undoing the - // change that was planned. - priorV, diags := hcl.ApplyPath(prior, path, nil) - if diags.HasErrors() { - // We just ignore the errors and move on here, since we assume it's - // just because the prior value was a slightly-different shape. - // It could potentially also be that the traversal doesn't match - // the schema, but we should've caught that during the validate - // walk if so. - return v, nil - } - return priorV, nil - }) - return ret, diags -} - -// a group of key-*ResourceAttrDiff pairs from the same flatmapped container -type flatAttrDiff map[string]*ResourceAttrDiff - -// we need to keep all keys if any of them have a diff that's not ignored -func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool { - for k, v := range f { - ignore := false - for attr := range ignoreChanges { - if strings.HasPrefix(k, attr) { - ignore = true - } - } - - if !v.Empty() && !v.NewComputed && !ignore { - return true - } - } - return false -} - -// EvalDiffDestroy is an EvalNode implementation that returns a plain -// destroy diff. -type EvalDiffDestroy struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - State **states.ResourceInstanceObject - ProviderAddr addrs.AbsProviderConfig - - Output **plans.ResourceInstanceChange - OutputState **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - - if n.ProviderAddr.Provider.Type == "" { - if n.DeposedKey == "" { - panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) - } else { - panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) - } - } - - // If there is no state or our attributes object is null then we're already - // destroyed. - if state == nil || state.Value.IsNull() { - return nil, nil - } - - // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff( - absAddr, n.DeposedKey.Generation(), - state.Value, - cty.NullVal(cty.DynamicPseudoType), - ) - }) - if err != nil { - return nil, err - } - - // Change is always the same for a destroy. We don't need the provider's - // help for this one. - // TODO: Should we give the provider an opportunity to veto this? - change := &plans.ResourceInstanceChange{ - Addr: absAddr, - DeposedKey: n.DeposedKey, - Change: plans.Change{ - Action: plans.Delete, - Before: state.Value, - After: cty.NullVal(cty.DynamicPseudoType), - }, - Private: state.Private, - ProviderAddr: n.ProviderAddr, - } - - // Call post-diff hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff( - absAddr, - n.DeposedKey.Generation(), - change.Action, - change.Before, - change.After, - ) - }) - if err != nil { - return nil, err - } - - // Update our output - *n.Output = change - - if n.OutputState != nil { - // Record our proposed new state, which is nil because we're destroying. - *n.OutputState = nil - } - - return nil, nil -} - -// EvalReduceDiff is an EvalNode implementation that takes a planned resource -// instance change as might be produced by EvalDiff or EvalDiffDestroy and -// "simplifies" it to a single atomic action to be performed by a specific -// graph node. -// -// Callers must specify whether they are a destroy node or a regular apply -// node. If the result is NoOp then the given change requires no action for -// the specific graph node calling this and so evaluation of the that graph -// node should exit early and take no action. -// -// The object written to OutChange may either be identical to InChange or -// a new change object derived from InChange. Because of the former case, the -// caller must not mutate the object returned in OutChange. -type EvalReduceDiff struct { - Addr addrs.ResourceInstance - InChange **plans.ResourceInstanceChange - Destroy bool - OutChange **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { - in := *n.InChange - out := in.Simplify(n.Destroy) - if n.OutChange != nil { - *n.OutChange = out - } - if out.Action != in.Action { - if n.Destroy { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) - } else { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) - } - } - return nil, nil -} - -// EvalReadDiff is an EvalNode implementation that retrieves the planned -// change for a particular resource instance object. -type EvalReadDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - csrc := changes.GetResourceInstanceChange(addr, gen) - if csrc == nil { - log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) - return nil, nil - } - - change, err := csrc.Decode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) - } - if n.Change != nil { - *n.Change = change - } - - log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) - - return nil, nil -} - -// EvalWriteDiff is an EvalNode implementation that saves a planned change -// for an instance object into the set of global planned changes. -type EvalWriteDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - if n.Change == nil || *n.Change == nil { - // Caller sets nil to indicate that we need to remove a change from - // the set of changes. - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - changes.RemoveResourceInstanceChange(addr, gen) - return nil, nil - } - - providerSchema := *n.ProviderSchema - change := *n.Change - - if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { - // Should never happen, and indicates a bug in the caller. - panic("inconsistent address and/or deposed key in EvalWriteDiff") - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - csrc, err := change.Encode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) - } - - changes.AppendResourceInstanceChange(csrc) - if n.DeposedKey == states.NotDeposed { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) - } else { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go deleted file mode 100644 index 470f798b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// EvalReturnError is an EvalNode implementation that returns an -// error if it is present. -// -// This is useful for scenarios where an error has been captured by -// another EvalNode (like EvalApply) for special EvalTree-based error -// handling, and that handling has completed, so the error should be -// returned normally. -type EvalReturnError struct { - Error *error -} - -func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { - if n.Error == nil { - return nil, nil - } - - return nil, *n.Error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go deleted file mode 100644 index 711c625c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -// EvalNodeFilterFunc is the callback used to replace a node with -// another to node. To not do the replacement, just return the input node. -type EvalNodeFilterFunc func(EvalNode) EvalNode - -// EvalNodeFilterable is an interface that can be implemented by -// EvalNodes to allow filtering of sub-elements. Note that this isn't -// a common thing to implement and you probably don't need it. -type EvalNodeFilterable interface { - EvalNode - Filter(EvalNodeFilterFunc) -} - -// EvalFilter runs the filter on the given node and returns the -// final filtered value. This should be called rather than checking -// the EvalNode directly since this will properly handle EvalNodeFilterables. -func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { - if f, ok := node.(EvalNodeFilterable); ok { - f.Filter(fn) - return node - } - - return fn(node) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go deleted file mode 100644 index 1a55f024..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -// EvalNodeOpFilterable is an interface that EvalNodes can implement -// to be filterable by the operation that is being run on Terraform. -type EvalNodeOpFilterable interface { - IncludeInOp(walkOperation) bool -} - -// EvalNodeFilterOp returns a filter function that filters nodes that -// include themselves in specific operations. -func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { - return func(n EvalNode) EvalNode { - include := true - if of, ok := n.(EvalNodeOpFilterable); ok { - include = of.IncludeInOp(op) - } - if include { - return n - } - - return EvalNoop{} - } -} - -// EvalOpFilter is an EvalNode implementation that is a proxy to -// another node but filters based on the operation. -type EvalOpFilter struct { - // Ops is the list of operations to include this node in. - Ops []walkOperation - - // Node is the node to execute - Node EvalNode -} - -// TODO: test -func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { - return EvalRaw(n.Node, ctx) -} - -// EvalNodeOpFilterable impl. -func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { - for _, v := range n.Ops { - if v == op { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go deleted file mode 100644 index c23e9a3f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go +++ /dev/null @@ -1,120 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// evaluateForEachExpression is our standard mechanism for interpreting an -// expression given for a "for_each" argument on a resource or a module. This -// should be called during expansion in order to determine the final keys and -// values. -// -// evaluateForEachExpression differs from evaluateForEachExpressionValue by -// returning an error if the count value is not known, and converting the -// cty.Value to a map[string]cty.Value for compatibility with other calls. -func evaluateForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { - forEachVal, diags := evaluateForEachExpressionValue(expr, ctx) - if !forEachVal.IsKnown() { - // Attach a diag as we do with count, with the same downsides - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, - Subject: expr.Range().Ptr(), - }) - } - - if forEachVal.IsNull() || !forEachVal.IsKnown() || forEachVal.LengthInt() == 0 { - // we check length, because an empty set return a nil map - return map[string]cty.Value{}, diags - } - - return forEachVal.AsValueMap(), diags -} - -// evaluateForEachExpressionValue is like evaluateForEachExpression -// except that it returns a cty.Value map or set which can be unknown. -func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - nullMap := cty.NullVal(cty.Map(cty.DynamicPseudoType)) - - if expr == nil { - return nullMap, diags - } - - forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - diags = diags.Append(forEachDiags) - if diags.HasErrors() { - return nullMap, diags - } - ty := forEachVal.Type() - - switch { - case forEachVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, - Subject: expr.Range().Ptr(), - }) - return nullMap, diags - case !forEachVal.IsKnown(): - // ensure that we have a map, and not a DynamicValue - return cty.UnknownVal(cty.Map(cty.DynamicPseudoType)), diags - - case !(ty.IsMapType() || ty.IsSetType() || ty.IsObjectType()): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, ty.FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nullMap, diags - - case forEachVal.LengthInt() == 0: - // If the map is empty ({}), return an empty map, because cty will - // return nil when representing {} AsValueMap. This also covers an empty - // set (toset([])) - return forEachVal, diags - } - - if ty.IsSetType() { - if ty.ElementType() != cty.String { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return cty.NullVal(ty), diags - } - - // since we can't use a set values that are unknown, we treat the - // entire set as unknown - if !forEachVal.IsWhollyKnown() { - return cty.UnknownVal(ty), diags - } - - // A set of strings may contain null, which makes it impossible to - // convert to a map, so we must return an error - it := forEachVal.ElementIterator() - for it.Next() { - item, _ := it.Element() - if item.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" sets must not contain null values.`), - Subject: expr.Range().Ptr(), - }) - return cty.NullVal(ty), diags - } - } - } - - return forEachVal, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go deleted file mode 100644 index d6b46a1f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go +++ /dev/null @@ -1,26 +0,0 @@ -package terraform - -// EvalIf is an EvalNode that is a conditional. -type EvalIf struct { - If func(EvalContext) (bool, error) - Then EvalNode - Else EvalNode -} - -// TODO: test -func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { - yes, err := n.If(ctx) - if err != nil { - return nil, err - } - - if yes { - return EvalRaw(n.Then, ctx) - } else { - if n.Else != nil { - return EvalRaw(n.Else, ctx) - } - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go deleted file mode 100644 index a60f4a0a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalImportState is an EvalNode implementation that performs an -// ImportState operation on a provider. This will return the imported -// states but won't modify any actual state. -type EvalImportState struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ID string - Output *[]providers.ImportedResource -} - -// TODO: test -func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - provider := *n.Provider - var diags tfdiags.Diagnostics - - { - // Call pre-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(absAddr, n.ID) - }) - if err != nil { - return nil, err - } - } - - resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: n.Addr.Resource.Type, - ID: n.ID, - }) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - imported := resp.ImportedResources - - for _, obj := range imported { - log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) - } - - if n.Output != nil { - *n.Output = imported - } - - { - // Call post-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(absAddr, imported) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalImportStateVerify verifies the state after ImportState and -// after the refresh to make sure it is non-nil and valid. -type EvalImportStateVerify struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - state := *n.State - if state.Value.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", - n.Addr.String(), - ), - )) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go deleted file mode 100644 index d3a4f5b4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go +++ /dev/null @@ -1,61 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// EvalConfigBlock is an EvalNode implementation that takes a raw -// configuration block and evaluates any expressions within it. -// -// ExpandedConfig is populated with the result of expanding any "dynamic" -// blocks in the given body, which can be useful for extracting correct source -// location information for specific attributes in the result. -type EvalConfigBlock struct { - Config *hcl.Body - Schema *configschema.Block - SelfAddr addrs.Referenceable - Output *cty.Value - ExpandedConfig *hcl.Body - ContinueOnErr bool -} - -func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { - val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) - if diags.HasErrors() && n.ContinueOnErr { - log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) - return nil, EvalEarlyExitError{} - } - - if n.Output != nil { - *n.Output = val - } - if n.ExpandedConfig != nil { - *n.ExpandedConfig = body - } - - return nil, diags.ErrWithWarnings() -} - -// EvalConfigExpr is an EvalNode implementation that takes a raw configuration -// expression and evaluates it. -type EvalConfigExpr struct { - Expr hcl.Expression - SelfAddr addrs.Referenceable - Output *cty.Value -} - -func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) - - if n.Output != nil { - *n.Output = val - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go deleted file mode 100644 index f30286e2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalLocal is an EvalNode implementation that evaluates the -// expression for a local value and writes it into a transient part of -// the state. -type EvalLocal struct { - Addr addrs.LocalValue - Expr hcl.Expression -} - -func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - // We ignore diags here because any problems we might find will be found - // again in EvaluateExpr below. - refs, _ := lang.ReferencesInExpr(n.Expr) - for _, ref := range refs { - if ref.Subject == n.Addr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referencing local value", - Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), - Subject: ref.SourceRange.ToHCL().Ptr(), - Context: n.Expr.Range().Ptr(), - }) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - - val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return nil, diags.Err() - } - - state := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write local value to nil state") - } - - state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) - - return nil, nil -} - -// EvalDeleteLocal is an EvalNode implementation that deletes a Local value -// from the state. Locals aren't persisted, but we don't need to evaluate them -// during destroy. -type EvalDeleteLocal struct { - Addr addrs.LocalValue -} - -func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go deleted file mode 100644 index f4bc8225..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -// EvalNoop is an EvalNode that does nothing. -type EvalNoop struct{} - -func (EvalNoop) Eval(EvalContext) (interface{}, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go deleted file mode 100644 index f1a195f6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ /dev/null @@ -1,135 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" -) - -// EvalDeleteOutput is an EvalNode implementation that deletes an output -// from the state. -type EvalDeleteOutput struct { - Addr addrs.AbsOutputValue -} - -// TODO: test -func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveOutputValue(n.Addr) - return nil, nil -} - -// EvalWriteOutput is an EvalNode implementation that writes the output -// for the given name to the current state. -type EvalWriteOutput struct { - Addr addrs.OutputValue - Sensitive bool - Expr hcl.Expression - // ContinueOnErr allows interpolation to fail during Input - ContinueOnErr bool -} - -// TODO: test -func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - addr := n.Addr.Absolute(ctx.Path()) - - // This has to run before we have a state lock, since evaluation also - // reads the state - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - // We'll handle errors below, after we have loaded the module. - - state := ctx.State() - if state == nil { - return nil, nil - } - - changes := ctx.Changes() // may be nil, if we're not working on a changeset - - // handling the interpolation error - if diags.HasErrors() { - if n.ContinueOnErr || flagWarnOutputErrors { - log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) - // if we're continuing, make sure the output is included, and - // marked as unknown. If the evaluator was able to find a type - // for the value in spite of the error then we'll use it. - n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) - return nil, EvalEarlyExitError{} - } - return nil, diags.Err() - } - - n.setValue(addr, state, changes, val) - - return nil, nil -} - -func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { - if val.IsKnown() && !val.IsNull() { - // The state itself doesn't represent unknown values, so we null them - // out here and then we'll save the real unknown value in the planned - // changeset below, if we have one on this graph walk. - log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) - stateVal := cty.UnknownAsNull(val) - state.SetOutputValue(addr, stateVal, n.Sensitive) - } else { - log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) - state.RemoveOutputValue(addr) - } - - // If we also have an active changeset then we'll replicate the value in - // there. This is used in preference to the state where present, since it - // *is* able to represent unknowns, while the state cannot. - if changes != nil { - // For the moment we are not properly tracking changes to output - // values, and just marking them always as "Create" or "Destroy" - // actions. A future release will rework the output lifecycle so we - // can track their changes properly, in a similar way to how we work - // with resource instances. - - var change *plans.OutputChange - if !val.IsNull() { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - Action: plans.Create, - Before: cty.NullVal(cty.DynamicPseudoType), - After: val, - }, - } - } else { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - // This is just a weird placeholder delete action since - // we don't have an actual prior value to indicate. - // FIXME: Generate real planned changes for output values - // that include the old values. - Action: plans.Delete, - Before: cty.NullVal(cty.DynamicPseudoType), - After: cty.NullVal(cty.DynamicPseudoType), - }, - } - } - - cs, err := change.Encode() - if err != nil { - // Should never happen, since we just constructed this right above - panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) - } - log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) - changes.RemoveOutputChange(addr) // remove any existing planned change, if present - changes.AppendOutputChange(cs) // add the new planned change - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go deleted file mode 100644 index 3b802d4e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ /dev/null @@ -1,146 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/tfdiags" -) - -func buildProviderConfig(ctx EvalContext, addr addrs.AbsProviderConfig, config *configs.Provider) hcl.Body { - var configBody hcl.Body - if config != nil { - configBody = config.Config - } - - var inputBody hcl.Body - inputConfig := ctx.ProviderInput(addr) - if len(inputConfig) > 0 { - inputBody = configs.SynthBody("", inputConfig) - } - - switch { - case configBody != nil && inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) - // Note that the inputBody is the _base_ here, because configs.MergeBodies - // expects the base have all of the required fields, while these are - // forced to be optional for the override. The input process should - // guarantee that we have a value for each of the required arguments and - // that in practice the sets of attributes in each body will be - // disjoint. - return configs.MergeBodies(inputBody, configBody) - case configBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) - return configBody - case inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) - return inputBody - default: - log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) - return hcl.EmptyBody() - } -} - -// EvalConfigProvider is an EvalNode implementation that configures -// a provider that is already initialized and retrieved. -type EvalConfigProvider struct { - Addr addrs.AbsProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil { - return nil, fmt.Errorf("EvalConfigProvider Provider is nil") - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - config := n.Config - - configBody := buildProviderConfig(ctx, n.Addr, config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configDiags := ctx.ConfigureProvider(n.Addr, configVal) - configDiags = configDiags.InConfigBody(configBody) - - return nil, configDiags.ErrWithWarnings() -} - -// EvalInitProvider is an EvalNode implementation that initializes a provider -// and returns nothing. The provider can be retrieved again with the -// EvalGetProvider node. -type EvalInitProvider struct { - Addr addrs.AbsProviderConfig -} - -func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.Addr) -} - -// EvalCloseProvider is an EvalNode implementation that closes provider -// connections that aren't needed anymore. -type EvalCloseProvider struct { - Addr addrs.AbsProviderConfig -} - -func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Addr) - return nil, nil -} - -// EvalGetProvider is an EvalNode implementation that retrieves an already -// initialized provider instance for the given name. -// -// Unlike most eval nodes, this takes an _absolute_ provider configuration, -// because providers can be passed into and inherited between modules. -// Resource nodes must therefore know the absolute path of the provider they -// will use, which is usually accomplished by implementing -// interface GraphNodeProviderConsumer. -type EvalGetProvider struct { - Addr addrs.AbsProviderConfig - Output *providers.Interface - - // If non-nil, Schema will be updated after eval to refer to the - // schema of the provider. - Schema **ProviderSchema -} - -func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Addr.Provider.Type == "" { - // Should never happen - panic("EvalGetProvider used with uninitialized provider configuration address") - } - - result := ctx.Provider(n.Addr) - if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Addr) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProviderSchema(n.Addr) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go deleted file mode 100644 index bc6b5cc7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/provisioners" -) - -// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner -// and returns nothing. The provisioner can be retrieved again with the -// EvalGetProvisioner node. -type EvalInitProvisioner struct { - Name string -} - -func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvisioner(n.Name) -} - -// EvalCloseProvisioner is an EvalNode implementation that closes provisioner -// connections that aren't needed anymore. -type EvalCloseProvisioner struct { - Name string -} - -func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvisioner(n.Name) - return nil, nil -} - -// EvalGetProvisioner is an EvalNode implementation that retrieves an already -// initialized provisioner instance for the given name. -type EvalGetProvisioner struct { - Name string - Output *provisioners.Interface - Schema **configschema.Block -} - -func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provisioner(n.Name) - if result == nil { - return nil, fmt.Errorf("provisioner %s not initialized", n.Name) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProvisionerSchema(n.Name) - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go deleted file mode 100644 index b00f031a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go +++ /dev/null @@ -1,293 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// evalReadData implements shared methods and data for the individual data -// source eval nodes. -type evalReadData struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - - // Planned is set when dealing with data resources that were deferred to - // the apply walk, to let us see what was planned. If this is set, the - // evaluation of the config is required to produce a wholly-known - // configuration which is consistent with the partial object included - // in this planned change. - Planned **plans.ResourceInstanceChange - - // State is the current state for the data source, and is updated once the - // new state has been read. - // While data sources are read-only, we need to start with the prior state - // to determine if we have a change or not. If we needed to read a new - // value, but it still matches the previous state, then we can record a - // NoNop change. If the states don't match then we record a Read change so - // that the new value is applied to the state. - State **states.ResourceInstanceObject - - // Output change records any change for this data source, which is - // interpreted differently than changes for managed resources. - // - During Refresh, this change is only used to correctly evaluate - // references to the data source, but it is not saved. - // - If a planned change has the action of plans.Read, it indicates that the - // data source could not be evaluated yet, and reading is being deferred to - // apply. - // - If planned action is plans.Update, it indicates that the data source - // was read, and the result needs to be stored in state during apply. - OutputChange **plans.ResourceInstanceChange -} - -// readDataSource handles everything needed to call ReadDataSource on the provider. -// A previously evaluated configVal can be passed in, or a new one is generated -// from the resource configuration. -func (n *evalReadData) readDataSource(ctx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var newVal cty.Value - - config := *n.Config - absAddr := n.Addr.Absolute(ctx.Path()) - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - diags = diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) - return newVal, diags - } - - provider := *n.Provider - - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type)) - return newVal, diags - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return newVal, diags - } - - log.Printf("[TRACE] EvalReadData: Re-validating config for %s", absAddr) - validateResp := provider.ValidateDataSourceConfig( - providers.ValidateDataSourceConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return newVal, validateResp.Diagnostics.InConfigBody(config.Config) - } - - // If we get down here then our configuration is complete and we're read - // to actually call the provider to read the data. - log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - ProviderMeta: metaConfigVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return newVal, diags - } - newVal = resp.State - if newVal == cty.NilVal { - // This can happen with incompletely-configured mocks. We'll allow it - // and treat it as an alias for a properly-typed null value. - newVal = cty.NullVal(schema.ImpliedType()) - } - - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return newVal, diags - } - - if newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced null object", - fmt.Sprintf( - "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, - ), - )) - } - - if !newVal.IsNull() && !newVal.IsWhollyKnown() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, - ), - )) - - // We'll still save the object, but we need to eliminate any unknown - // values first because we can't serialize them in the state file. - // Note that this may cause set elements to be coalesced if they - // differed only by having unknown values, but we don't worry about - // that here because we're saving the value only for inspection - // purposes; the error we added above will halt the graph walk. - newVal = cty.UnknownAsNull(newVal) - } - - return newVal, diags -} - -func (n *evalReadData) providerMetas(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - } - } - } - return metaConfigVal, diags -} - -// evalReadDataRefresh is an EvalNode implementation that handled the data -// resource lifecycle during refresh -type evalReadDataRefresh struct { - evalReadData -} - -func (n *evalReadDataRefresh) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - absAddr := n.Addr.Absolute(ctx.Path()) - config := *n.Config - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type) - } - - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) - if n.State != nil && *n.State != nil { - priorVal = (*n.State).Value - } - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - configKnown := configVal.IsWhollyKnown() - // If our configuration contains any unknown values, then we must defer the - // read until plan or apply. If we've never read this data source and we - // have any depends_on, we will have to defer reading until plan to resolve - // the dependency changes. - // Assuming we can read the data source with depends_on if we have - // existing state is a compromise to prevent data sources from continually - // showing a diff. We have to make the assumption that if we have a prior - // state, since there are no prior dependency changes happening during - // refresh, that we can read this resource. If there are dependency updates - // in the config, they we be discovered in plan and the data source will be - // read again. - if !configKnown || (priorVal.IsNull() && len(n.Config.DependsOn) > 0) { - if configKnown { - log.Printf("[TRACE] evalReadDataRefresh: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) - } else { - log.Printf("[TRACE] evalReadDataRefresh: %s configuration not fully known yet, so deferring to apply phase", absAddr) - } - - // We need to store a change so tat other references to this data - // source can resolve correctly, since the state is not going to be up - // to date. - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: objchange.PlannedDataResourceObject(schema, configVal), - }, - } - - *n.State = &states.ResourceInstanceObject{ - Value: cty.NullVal(objTy), - Status: states.ObjectPlanned, - } - - return nil, diags.ErrWithWarnings() - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, priorVal) - }); err != nil { - diags = diags.Append(err) - return nil, diags.ErrWithWarnings() - } - - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - // This may still have been refreshed with references to resources that - // will be updated, but that will be caught as a change during plan. - *n.State = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newVal) - }); err != nil { - diags = diags.Append(err) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data_apply.go deleted file mode 100644 index 888d1618..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data_apply.go +++ /dev/null @@ -1,98 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// evalReadDataApply is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type evalReadDataApply struct { - evalReadData -} - -func (n *evalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - var planned *plans.ResourceInstanceChange - if n.Planned != nil { - planned = *n.Planned - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - if planned != nil && planned.Action != plans.Read { - // If any other action gets in here then that's always a bug; this - // EvalNode only deals with reading. - return nil, fmt.Errorf( - "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", - planned.Action, absAddr, - ) - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, states.CurrentGen, planned.Action, planned.Before, planned.After) - }); err != nil { - return nil, err - } - - // We have a change and it is complete, which means we read the data - // source during plan and only need to store it in state. - if planned.After.IsWhollyKnown() { - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, states.CurrentGen, planned.After, nil) - }); err != nil { - diags = diags.Append(err) - } - - *n.State = &states.ResourceInstanceObject{ - Value: planned.After, - Status: states.ObjectReady, - } - return nil, diags.ErrWithWarnings() - } - - config := *n.Config - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type) - } - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - *n.State = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, states.CurrentGen, newVal, diags.Err()) - }); err != nil { - diags = diags.Append(err) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data_plan.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data_plan.go deleted file mode 100644 index 0a6d4c43..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data_plan.go +++ /dev/null @@ -1,173 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// evalReadDataPlan is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type evalReadDataPlan struct { - evalReadData - - // dependsOn stores the list of transitive resource addresses that any - // configuration depends_on references may resolve to. This is used to - // determine if there are any changes that will force this data sources to - // be deferred to apply. - dependsOn []addrs.ConfigResource -} - -func (n *evalReadDataPlan) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - var configVal cty.Value - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - config := *n.Config - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type) - } - - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) - if n.State != nil && *n.State != nil { - priorVal = (*n.State).Value - } - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - configKnown := configVal.IsWhollyKnown() - // If our configuration contains any unknown values, or we depend on any - // unknown values then we must defer the read to the apply phase by - // producing a "Read" change for this resource, and a placeholder value for - // it in the state. - if n.forcePlanRead(ctx) || !configKnown { - if configKnown { - log.Printf("[TRACE] evalReadDataPlan: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) - } else { - log.Printf("[TRACE] evalReadDataPlan: %s configuration not fully known yet, so deferring to apply phase", absAddr) - } - - proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }); err != nil { - diags = diags.Append(err) - return nil, diags.ErrWithWarnings() - } - - // Apply detects that the data source will need to be read by the After - // value containing unknowns from PlanDataResourceObject. - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: proposedNewVal, - }, - } - - *n.State = &states.ResourceInstanceObject{ - Value: cty.NullVal(objTy), - Status: states.ObjectPlanned, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, plans.Read, priorVal, proposedNewVal) - }); err != nil { - diags = diags.Append(err) - } - - return nil, diags.ErrWithWarnings() - } - - // If we have a stored state we may not need to re-read the data source. - // Check the config against the state to see if there are any difference. - if !priorVal.IsNull() { - // Applying the configuration to the prior state lets us see if there - // are any differences. - proposed := objchange.ProposedNewObject(schema, priorVal, configVal) - if proposed.Equals(priorVal).True() { - log.Printf("[TRACE] evalReadDataPlan: %s no change detected, using existing state", absAddr) - // state looks up to date, and must have been read during refresh - return nil, diags.ErrWithWarnings() - } - } - - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - // The returned value from ReadDataSource must be non-nil and known, - // which we store in the change. Apply will use the fact that the After - // value is wholly kown to save the state directly, rather than reading the - // data source again. - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: newVal, - }, - } - - *n.State = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectPlanned, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, plans.Update, priorVal, newVal) - }); err != nil { - return nil, err - } - - return nil, diags.ErrWithWarnings() -} - -// forcePlanRead determines if we need to override the usual behavior of -// immediately reading from the data source where possible, instead forcing us -// to generate a plan. -func (n *evalReadDataPlan) forcePlanRead(ctx EvalContext) bool { - // Check and see if any depends_on dependencies have - // changes, since they won't show up as changes in the - // configuration. - changes := ctx.Changes() - for _, d := range n.dependsOn { - for _, change := range changes.GetChangesForConfigResource(d) { - if change != nil && change.Action != plans.NoOp { - return true - } - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go deleted file mode 100644 index d56291c1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go +++ /dev/null @@ -1,137 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalRefresh is an EvalNode implementation that does a refresh for -// a resource. -type EvalRefresh struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - Output **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) - return nil, diags.ErrWithWarnings() - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - log.Printf("[DEBUG] EvalRefresh: ProviderMeta config value set") - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - log.Printf("[DEBUG] EvalRefresh: no ProviderMeta schema") - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - log.Printf("[DEBUG] EvalRefresh: ProviderMeta schema found: %+v", (*n.ProviderSchema).ProviderMeta) - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - } - } - - // Call pre-refresh hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, state.Value) - }) - if err != nil { - return nil, diags.ErrWithWarnings() - } - - // Refresh! - priorVal := state.Value - req := providers.ReadResourceRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: priorVal, - Private: state.Private, - ProviderMeta: metaConfigVal, - } - - provider := *n.Provider - resp := provider.ReadResource(req) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - if resp.NewState == cty.NilVal { - // This ought not to happen in real cases since it's not possible to - // send NilVal over the plugin RPC channel, but it can come up in - // tests due to sloppy mocking. - panic("new state is cty.NilVal") - } - - for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - newState := state.DeepCopy() - newState.Value = resp.NewState - newState.Private = resp.Private - newState.Dependencies = state.Dependencies - newState.CreateBeforeDestroy = state.CreateBeforeDestroy - - // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = newState - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go deleted file mode 100644 index 3485e4f1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalSequence is an EvalNode that evaluates in sequence. -type EvalSequence struct { - Nodes []EvalNode -} - -func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - for _, n := range n.Nodes { - if n == nil { - continue - } - - if _, err := EvalRaw(n, ctx); err != nil { - if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { - // In this path we abort early, losing any non-error - // diagnostics we saw earlier. - return nil, err - } - diags = diags.Append(err) - if diags.HasErrors() { - // Halt if we get some errors, but warnings are okay. - break - } - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalNodeFilterable impl. -func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { - for i, node := range n.Nodes { - n.Nodes[i] = fn(node) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go deleted file mode 100644 index eca68056..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ /dev/null @@ -1,585 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalReadState is an EvalNode implementation that reads the -// current object for a specific instance in the state. -type EvalReadState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadState used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadState used with no ProviderSchema object") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) - - src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalReadStateDeposed is an EvalNode implementation that reads the -// deposed InstanceState for a specific resource out of the state -type EvalReadStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key identifies which deposed object we will read. - Key states.DeposedKey - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadStateDeposed used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadStateDeposed used with no ProviderSchema object") - } - - key := n.Key - if key == states.NotDeposed { - return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") - } - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - - src := ctx.State().ResourceInstanceObject(absAddr, key) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalRequireState is an EvalNode implementation that exits early if the given -// object is null. -type EvalRequireState struct { - State **states.ResourceInstanceObject -} - -func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - return nil, EvalEarlyExitError{} - } - - state := *n.State - if state == nil || state.Value.IsNull() { - return nil, EvalEarlyExitError{} - } - - return nil, nil -} - -// EvalUpdateStateHook is an EvalNode implementation that calls the -// PostStateUpdate hook with the current state. -type EvalUpdateStateHook struct{} - -func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - // In principle we could grab the lock here just long enough to take a - // deep copy and then pass that to our hooks below, but we'll instead - // hold the hook for the duration to avoid the potential confusing - // situation of us racing to call PostStateUpdate concurrently with - // different state snapshots. - stateSync := ctx.State() - state := stateSync.Lock().DeepCopy() - defer stateSync.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - if err != nil { - return nil, err - } - - return nil, nil -} - -// evalWriteEmptyState wraps EvalWriteState to specifically record an empty -// state for a particular object. -type evalWriteEmptyState struct { - EvalWriteState -} - -func (n *evalWriteEmptyState) Eval(ctx EvalContext) (interface{}, error) { - var state *states.ResourceInstanceObject - n.State = &state - return n.EvalWriteState.Eval(ctx) -} - -// EvalWriteState is an EvalNode implementation that saves the given object -// as the current object for the selected resource instance. -type EvalWriteState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig - - // Dependencies are the inter-resource dependencies to be stored in the - // state. - Dependencies *[]addrs.ConfigResource -} - -func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteState used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - if n.ProviderAddr.Provider.Type == "" { - return nil, fmt.Errorf("failed to write state for %s: missing provider type", absAddr) - } - obj := *n.State - if obj == nil || obj.Value.IsNull() { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) - return nil, nil - } - - // store the new deps in the state - if n.Dependencies != nil { - log.Printf("[TRACE] EvalWriteState: recording %d dependencies for %s", len(*n.Dependencies), absAddr) - obj.Dependencies = *n.Dependencies - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteState used with pointer to nil ProviderSchema object") - } - - if obj != nil { - log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) - } else { - log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) - return nil, nil -} - -// EvalWriteStateDeposed is an EvalNode implementation that writes -// an InstanceState out to the Deposed list of a resource in the state. -type EvalWriteStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key indicates which deposed object to write to. - Key states.DeposedKey - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteStateDeposed used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - key := n.Key - state := ctx.State() - - if key == states.NotDeposed { - // should never happen - return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) - } - - obj := *n.State - if obj == nil { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteStateDeposed used with no ProviderSchema object") - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) - state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) - return nil, nil -} - -// EvalDeposeState is an EvalNode implementation that moves the current object -// for the given instance to instead be a deposed object, leaving the instance -// with no current object. -// This is used at the beginning of a create-before-destroy replace action so -// that the create can create while preserving the old state of the -// to-be-destroyed object. -type EvalDeposeState struct { - Addr addrs.ResourceInstance - - // ForceKey, if a value other than states.NotDeposed, will be used as the - // key for the newly-created deposed object that results from this action. - // If set to states.NotDeposed (the zero value), a new unique key will be - // allocated. - ForceKey states.DeposedKey - - // OutputKey, if non-nil, will be written with the deposed object key that - // was generated for the object. This can then be passed to - // EvalUndeposeState.Key so it knows which deposed instance to forget. - OutputKey *states.DeposedKey -} - -// TODO: test -func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - var key states.DeposedKey - if n.ForceKey == states.NotDeposed { - key = state.DeposeResourceInstanceObject(absAddr) - } else { - key = n.ForceKey - state.DeposeResourceInstanceObjectForceKey(absAddr, key) - } - log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - - if n.OutputKey != nil { - *n.OutputKey = key - } - - return nil, nil -} - -// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will -// restore a particular deposed object of the specified resource instance -// to be the "current" object if and only if the instance doesn't currently -// have a current object. -// -// This is intended for use when the create leg of a create before destroy -// fails with no partial new object: if we didn't take any action, the user -// would be left in the unfortunate situation of having no current object -// and the previously-workign object now deposed. This EvalNode causes a -// better outcome by restoring things to how they were before the replace -// operation began. -// -// The create operation may have produced a partial result even though it -// failed and it's important that we don't "forget" that state, so in that -// situation the prior object remains deposed and the partial new object -// remains the current object, allowing the situation to hopefully be -// improved in a subsequent run. -type EvalMaybeRestoreDeposedObject struct { - Addr addrs.ResourceInstance - - // PlannedChange might be the action we're performing that includes - // the possiblity of restoring a deposed object. However, it might also - // be nil. It's here only for use in error messages and must not be - // used for business logic. - PlannedChange **plans.ResourceInstanceChange - - // Key is a pointer to the deposed object key that should be forgotten - // from the state, which must be non-nil. - Key *states.DeposedKey -} - -// TODO: test -func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - dk := *n.Key - state := ctx.State() - - if dk == states.NotDeposed { - // This should never happen, and so it always indicates a bug. - // We should evaluate this node only if we've previously deposed - // an object as part of the same operation. - var diags tfdiags.Diagnostics - if n.PlannedChange != nil && *n.PlannedChange != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This occurred during a %s action. This is a bug in Terraform; please report it!", - absAddr, (*n.PlannedChange).Action, - ), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This is a bug in Terraform; please report it!", - absAddr, - ), - )) - } - return nil, diags.Err() - } - - restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) - if restored { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) - } else { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) - } - - return nil, nil -} - -// EvalWriteResourceState is an EvalNode implementation that ensures that -// a suitable resource-level state record is present in the state, if that's -// required for the "each mode" of that resource. -// -// This is important primarily for the situation where count = 0, since this -// eval is the only change we get to set the resource "each mode" to list -// in that case, allowing expression evaluation to see it as a zero-element -// list rather than as not set at all. -type EvalWriteResourceState struct { - Addr addrs.AbsResource - Config *configs.Resource - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - state := ctx.State() - - // We'll record our expansion decision in the shared "expander" object - // so that later operations (i.e. DynamicExpand and expression evaluation) - // can refer to it. Since this node represents the abstract module, we need - // to expand the module here to create all resources. - expander := ctx.InstanceExpander() - - switch { - case n.Config.Count != nil: - count, countDiags := evaluateCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - state.SetResourceProvider(n.Addr, n.ProviderAddr) - expander.SetResourceCount(n.Addr.Module, n.Addr.Resource, count) - - case n.Config.ForEach != nil: - forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // This method takes care of all of the business logic of updating this - // while ensuring that any existing instances are preserved, etc. - state.SetResourceProvider(n.Addr, n.ProviderAddr) - expander.SetResourceForEach(n.Addr.Module, n.Addr.Resource, forEach) - - default: - state.SetResourceProvider(n.Addr, n.ProviderAddr) - expander.SetResourceSingle(n.Addr.Module, n.Addr.Resource) - } - - return nil, nil -} - -// EvalForgetResourceState is an EvalNode implementation that prunes out an -// empty resource-level state for a given resource address, or produces an -// error if it isn't empty after all. -// -// This should be the last action taken for a resource that has been removed -// from the configuration altogether, to clean up the leftover husk of the -// resource in the state after other EvalNodes have destroyed and removed -// all of the instances and instance objects beneath it. -type EvalForgetResourceState struct { - Addr addrs.Resource -} - -func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - pruned := state.RemoveResourceIfEmpty(absAddr) - if !pruned { - // If this produces an error, it indicates a bug elsewhere in Terraform - // -- probably missing graph nodes, graph edges, or - // incorrectly-implemented evaluation steps. - return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) - } - log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) - - return nil, nil -} - -// EvalRefreshDependencies is an EvalNode implementation that appends any newly -// found dependencies to those saved in the state. The existing dependencies -// are retained, as they may be missing from the config, and will be required -// for the updates and destroys during the next apply. -type EvalRefreshDependencies struct { - // Prior State - State **states.ResourceInstanceObject - // Dependencies to write to the new state - Dependencies *[]addrs.ConfigResource -} - -func (n *EvalRefreshDependencies) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - if state == nil { - // no existing state to append - return nil, nil - } - - depMap := make(map[string]addrs.ConfigResource) - for _, d := range *n.Dependencies { - depMap[d.String()] = d - } - - // We have already dependencies in state, so we need to trust those for - // refresh. We can't write out new dependencies until apply time in case - // the configuration has been changed in a manner the conflicts with the - // stored dependencies. - if len(state.Dependencies) > 0 { - *n.Dependencies = state.Dependencies - return nil, nil - } - - deps := make([]addrs.ConfigResource, 0, len(depMap)) - for _, d := range depMap { - deps = append(deps, d) - } - - sort.Slice(deps, func(i, j int) bool { - return deps[i].String() < deps[j].String() - }) - - *n.Dependencies = deps - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go deleted file mode 100644 index acf671c6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go +++ /dev/null @@ -1,107 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// UpgradeResourceState will, if necessary, run the provider-defined upgrade -// logic against the given state object to make it compliant with the -// current schema version. This is a no-op if the given state object is -// already at the latest version. -// -// If any errors occur during upgrade, error diagnostics are returned. In that -// case it is not safe to proceed with using the original state object. -func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { - if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { - // We only do state upgrading for managed resources. - return src, nil - } - - stateIsFlatmap := len(src.AttrsJSON) == 0 - - // TODO: This should eventually use a proper FQN. - providerType := addr.Resource.Resource.ImpliedProvider() - if src.SchemaVersion > currentVersion { - log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource instance managed by newer provider version", - // This is not a very good error message, but we don't retain enough - // information in state to give good feedback on what provider - // version might be required here. :( - fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), - )) - return nil, diags - } - - // If we get down here then we need to upgrade the state, with the - // provider's help. - // If this state was originally created by a version of Terraform prior to - // v0.12, this also includes translating from legacy flatmap to new-style - // representation, since only the provider has enough information to - // understand a flatmap built against an older schema. - if src.SchemaVersion != currentVersion { - log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) - } else { - log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) - } - - req := providers.UpgradeResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - - // TODO: The internal schema version representations are all using - // uint64 instead of int64, but unsigned integers aren't friendly - // to all protobuf target languages so in practice we use int64 - // on the wire. In future we will change all of our internal - // representations to int64 too. - Version: int64(src.SchemaVersion), - } - - if stateIsFlatmap { - req.RawStateFlatmap = src.AttrsFlat - } else { - req.RawStateJSON = src.AttrsJSON - } - - resp := provider.UpgradeResourceState(req) - diags := resp.Diagnostics - if diags.HasErrors() { - return nil, diags - } - - // After upgrading, the new value must conform to the current schema. When - // going over RPC this is actually already ensured by the - // marshaling/unmarshaling of the new value, but we'll check it here - // anyway for robustness, e.g. for in-process providers. - newValue := resp.UpgradedState - if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource state upgrade", - fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), - )) - } - return nil, diags - } - - new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) - if err != nil { - // We already checked for type conformance above, so getting into this - // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to encode result of resource state upgrade", - fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), - )) - } - return new, diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go deleted file mode 100644 index 00b80d37..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ /dev/null @@ -1,624 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EvalValidateCount is an EvalNode implementation that validates -// the count of a resource. -type EvalValidateCount struct { - Resource *configs.Resource -} - -// TODO: test -func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - var count int - var err error - - val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - goto RETURN - } - if val.IsNull() || !val.IsKnown() { - goto RETURN - } - - err = gocty.FromCtyValue(val, &count) - if err != nil { - // The EvaluateExpr call above already guaranteed us a number value, - // so if we end up here then we have something that is out of range - // for an int, and the error message will include a description of - // the valid range. - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), - Subject: n.Resource.Count.Range().Ptr(), - }) - } else if count < 0 { - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), - Subject: n.Resource.Count.Range().Ptr(), - }) - } - -RETURN: - return nil, diags.NonFatalErr() -} - -// EvalValidateProvider is an EvalNode implementation that validates -// a provider configuration. -type EvalValidateProvider struct { - Addr addrs.AbsProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - provider := *n.Provider - - configBody := buildProviderConfig(ctx, n.Addr, n.Config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - if configSchema == nil { - // Should never happen in real code, but often comes up in tests where - // mock schemas are being used that tend to be incomplete. - log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) - configSchema = &configschema.Block{} - } - - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - req := providers.PrepareProviderConfigRequest{ - Config: configVal, - } - - validateResp := provider.PrepareProviderConfig(req) - diags = diags.Append(validateResp.Diagnostics) - - return nil, diags.NonFatalErr() -} - -// EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a provisioner belonging to a resource. The provisioner -// config is expected to contain the merged connection configurations. -type EvalValidateProvisioner struct { - ResourceAddr addrs.Resource - Provisioner *provisioners.Interface - Schema **configschema.Block - Config *configs.Provisioner - ResourceHasCount bool - ResourceHasForEach bool -} - -func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { - provisioner := *n.Provisioner - config := *n.Config - schema := *n.Schema - - var diags tfdiags.Diagnostics - - { - // Validate the provisioner's own config first - - configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - if configVal == cty.NilVal { - // Should never happen for a well-behaved EvaluateBlock implementation - return nil, fmt.Errorf("EvaluateBlock returned nil value") - } - - req := provisioners.ValidateProvisionerConfigRequest{ - Config: configVal, - } - - resp := provisioner.ValidateProvisionerConfig(req) - diags = diags.Append(resp.Diagnostics) - } - - { - // Now validate the connection config, which contains the merged bodies - // of the resource and provisioner connection blocks. - connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) - diags = diags.Append(connDiags) - } - - return nil, diags.NonFatalErr() -} - -func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - - var diags tfdiags.Diagnostics - - if config == nil || config.Config == nil { - // No block to validate - return diags - } - - // We evaluate here just by evaluating the block and returning any - // diagnostics we get, since evaluation alone is enough to check for - // extraneous arguments and incorrectly-typed arguments. - _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) - diags = diags.Append(configDiags) - - return diags -} - -func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - keyData := EvalDataForNoInstanceKey - selfAddr := n.ResourceAddr.Instance(addrs.NoKey) - - if n.ResourceHasCount { - // For a resource that has count, we allow count.index but don't - // know at this stage what it will return. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // "self" can't point to an unknown key, but we'll force it to be - // key 0 here, which should return an unknown value of the - // expected type since none of these elements are known at this - // point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) - } else if n.ResourceHasForEach { - // For a resource that has for_each, we allow each.value and each.key - // but don't know at this stage what it will return. - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - // "self" can't point to an unknown key, but we'll force it to be - // key "" here, which should return an unknown value of the - // expected type since none of these elements are known at - // this point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) - } - - return ctx.EvaluateBlock(body, schema, selfAddr, keyData) -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. Once that is done, we can remove -// this and use a type-specific schema from the communicator to validate -// exactly what is expected for a given connection type. -var connectionBlockSupersetSchema = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - // NOTE: "type" is not included here because it's treated special - // by the config loader and stored away in a separate field. - - // Common attributes for both connection types - "host": { - Type: cty.String, - Required: true, - }, - "type": { - Type: cty.String, - Optional: true, - }, - "user": { - Type: cty.String, - Optional: true, - }, - "password": { - Type: cty.String, - Optional: true, - }, - "port": { - Type: cty.String, - Optional: true, - }, - "timeout": { - Type: cty.String, - Optional: true, - }, - "script_path": { - Type: cty.String, - Optional: true, - }, - - // For type=ssh only (enforced in ssh communicator) - "private_key": { - Type: cty.String, - Optional: true, - }, - "certificate": { - Type: cty.String, - Optional: true, - }, - "host_key": { - Type: cty.String, - Optional: true, - }, - "agent": { - Type: cty.Bool, - Optional: true, - }, - "agent_identity": { - Type: cty.String, - Optional: true, - }, - "bastion_host": { - Type: cty.String, - Optional: true, - }, - "bastion_host_key": { - Type: cty.String, - Optional: true, - }, - "bastion_port": { - Type: cty.Number, - Optional: true, - }, - "bastion_user": { - Type: cty.String, - Optional: true, - }, - "bastion_password": { - Type: cty.String, - Optional: true, - }, - "bastion_private_key": { - Type: cty.String, - Optional: true, - }, - "bastion_certificate": { - Type: cty.String, - Optional: true, - }, - - // For type=winrm only (enforced in winrm communicator) - "https": { - Type: cty.Bool, - Optional: true, - }, - "insecure": { - Type: cty.Bool, - Optional: true, - }, - "cacert": { - Type: cty.String, - Optional: true, - }, - "use_ntlm": { - Type: cty.Bool, - Optional: true, - }, - }, -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. It's exported only for use in the -// configs/configupgrade package and should not be used from anywhere else. -// The caller may not modify any part of the returned schema data structure. -func ConnectionBlockSupersetSchema() *configschema.Block { - return connectionBlockSupersetSchema -} - -// EvalValidateResource is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateResource struct { - Addr addrs.Resource - Provider *providers.Interface - ProviderSchema **ProviderSchema - Config *configs.Resource - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - - // IgnoreWarnings means that warnings will not be passed through. This allows - // "just-in-time" passes of validation to continue execution through warnings. - IgnoreWarnings bool - - // ConfigVal, if non-nil, will be updated with the value resulting from - // evaluating the given configuration body. Since validation is performed - // very early, this value is likely to contain lots of unknown values, - // but its type will conform to the schema of the resource type associated - // with the resource instance being validated. - ConfigVal *cty.Value -} - -func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - cfg := *n.Config - schema := *n.ProviderSchema - mode := cfg.Mode - - keyData := EvalDataForNoInstanceKey - - switch { - case n.Config.Count != nil: - // If the config block has count, we'll evaluate with an unknown - // number as count.index so we can still type check even though - // we won't expand count until the plan phase. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // Basic type-checking of the count argument. More complete validation - // of this will happen when we DynamicExpand during the plan walk. - countDiags := n.validateCount(ctx, n.Config.Count) - diags = diags.Append(countDiags) - - case n.Config.ForEach != nil: - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.UnknownVal(cty.DynamicPseudoType), - } - - // Evaluate the for_each expression here so we can expose the diagnostics - forEachDiags := n.validateForEach(ctx, n.Config.ForEach) - diags = diags.Append(forEachDiags) - } - - for _, traversal := range n.Config.DependsOn { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if !refDiags.HasErrors() && len(ref.Remaining) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid depends_on reference", - Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", - Subject: ref.Remaining.SourceRange().Ptr(), - }) - } - - // The ref must also refer to something that exists. To test that, - // we'll just eval it and count on the fact that our evaluator will - // detect references to non-existent objects. - if !diags.HasErrors() { - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - if scope != nil { // sometimes nil in tests, due to incomplete mocks - _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) - diags = diags.Append(refDiags) - } - } - } - - // Validate the provider_meta block for the provider this resource - // belongs to, if there is one. - // - // Note: this will return an error for every resource a provider - // uses in a module, if the provider_meta for that module is - // incorrect. The only way to solve this that we've foudn is to - // insert a new ProviderMeta graph node in the graph, and make all - // that provider's resources in the module depend on the node. That's - // an awful heavy hammer to swing for this feature, which should be - // used only in limited cases with heavy coordination with the - // Terraform team, so we're going to defer that solution for a future - // enhancement to this functionality. - /* - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.ProviderConfig.Type]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", cfg.ProviderConfigAddr()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - _, _, metaDiags := ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(metaDiags) - } - } - } - */ - // BUG(paddy): we're not validating provider_meta blocks on EvalValidate right now - // because the ProviderAddr for the resource isn't available on the EvalValidate - // struct. - - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch mode { - case addrs.ManagedResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type", - Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks - for _, traversal := range cfg.Managed.IgnoreChanges { - moreDiags := schema.StaticValidateTraversal(traversal) - diags = diags.Append(moreDiags) - } - } - - req := providers.ValidateResourceTypeConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateResourceTypeConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - - if n.ConfigVal != nil { - *n.ConfigVal = configVal - } - - case addrs.DataResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source", - Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - req := providers.ValidateDataSourceConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateDataSourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - } - - if n.IgnoreWarnings { - // If we _only_ have warnings then we'll return nil. - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - return nil, nil - } else { - // We'll return an error if there are any diagnostics at all, even if - // some of them are warnings. - return nil, diags.NonFatalErr() - } -} - -func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { - if expr == nil { - return nil - } - - var diags tfdiags.Diagnostics - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return diags - } - - if countVal.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - var err error - countVal, err = convert.Convert(countVal, cty.Number) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk. - if !countVal.IsKnown() { - return diags - } - - // If we _do_ know the value, then we can do a few more checks here. - var count int - err = gocty.FromCtyValue(countVal, &count) - if err != nil { - // Isn't a whole number, etc. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - return diags -} - -func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - val, forEachDiags := evaluateForEachExpressionValue(expr, ctx) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !val.IsKnown() { - return diags - } - - if forEachDiags.HasErrors() { - diags = diags.Append(forEachDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go deleted file mode 100644 index dd5e4018..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go +++ /dev/null @@ -1,67 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that -// expressions within a particular referencable block do not reference that -// same block. -type EvalValidateSelfRef struct { - Addr addrs.Referenceable - Config hcl.Body - ProviderSchema **ProviderSchema -} - -func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - addr := n.Addr - - addrStrs := make([]string, 0, 1) - addrStrs = append(addrStrs, addr.String()) - switch tAddr := addr.(type) { - case addrs.ResourceInstance: - // A resource instance may not refer to its containing resource either. - addrStrs = append(addrStrs, tAddr.ContainingResource().String()) - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) - } - - providerSchema := *n.ProviderSchema - var schema *configschema.Block - switch tAddr := addr.(type) { - case addrs.Resource: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr) - case addrs.ResourceInstance: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) - } - - if schema == nil { - return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) - } - - refs, _ := lang.ReferencesInBlock(n.Config, schema) - for _, ref := range refs { - for _, addrStr := range addrStrs { - if ref.Subject.String() == addrStr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referential block", - Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return nil, diags.NonFatalErr() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go deleted file mode 100644 index 5fc90245..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go +++ /dev/null @@ -1,245 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// EvalSetModuleCallArguments is an EvalNode implementation that sets values -// for arguments of a child module call, for later retrieval during -// expression evaluation. -type EvalSetModuleCallArguments struct { - Module addrs.ModuleCallInstance - Values map[string]cty.Value -} - -// TODO: test -func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetModuleCallArguments(n.Module, n.Values) - return nil, nil -} - -// EvalModuleCallArgument is an EvalNode implementation that produces the value -// for a particular variable as will be used by a child module instance. -// -// The result is written into the map given in Values, with its key -// set to the local name of the variable, disregarding the module instance -// address. Any existing values in that map are deleted first. This weird -// interface is a result of trying to be convenient for use with -// EvalContext.SetModuleCallArguments, which expects a map to merge in with -// any existing arguments. -type EvalModuleCallArgument struct { - Addr addrs.InputVariable - Config *configs.Variable - Expr hcl.Expression - ModuleInstance addrs.ModuleInstance - - Values map[string]cty.Value - - // validateOnly indicates that this evaluation is only for config - // validation, and we will not have any expansion module instance - // repetition data. - validateOnly bool -} - -func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { - // Clear out the existing mapping - for k := range n.Values { - delete(n.Values, k) - } - - wantType := n.Config.Type - name := n.Addr.Name - expr := n.Expr - - if expr == nil { - // Should never happen, but we'll bail out early here rather than - // crash in case it does. We set no value at all in this case, - // making a subsequent call to EvalContext.SetModuleCallArguments - // a no-op. - log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) - return nil, nil - } - - var moduleInstanceRepetitionData instances.RepetitionData - - switch { - case n.validateOnly: - // the instance expander does not track unknown expansion values, so we - // have to assume all RepetitionData is unknown. - moduleInstanceRepetitionData = instances.RepetitionData{ - CountIndex: cty.UnknownVal(cty.Number), - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - default: - // Get the repetition data for this module instance, - // so we can create the appropriate scope for evaluating our expression - moduleInstanceRepetitionData = ctx.InstanceExpander().GetModuleInstanceRepetitionData(n.ModuleInstance) - } - - scope := ctx.EvaluationScope(nil, moduleInstanceRepetitionData) - val, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) - - // We intentionally passed DynamicPseudoType to EvalExpr above because - // now we can do our own local type conversion and produce an error message - // with better context if it fails. - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for module argument", - Detail: fmt.Sprintf( - "The given value is not suitable for child module variable %q defined at %s: %s.", - name, n.Config.DeclRange.String(), convErr, - ), - Subject: expr.Range().Ptr(), - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - n.Values[name] = val - return nil, diags.ErrWithWarnings() -} - -// evalVariableValidations is an EvalNode implementation that ensures that -// all of the configured custom validations for a variable are passing. -// -// This must be used only after any side-effects that make the value of the -// variable available for use in expression evaluation, such as -// EvalModuleCallArgument for variables in descendent modules. -type evalVariableValidations struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable - - // Expr is the expression that provided the value for the variable, if any. - // This will be nil for root module variables, because their values come - // from outside the configuration. - Expr hcl.Expression -} - -func (n *evalVariableValidations) Eval(ctx EvalContext) (interface{}, error) { - if n.Config == nil || len(n.Config.Validations) == 0 { - log.Printf("[TRACE] evalVariableValidations: not active for %s, so skipping", n.Addr) - return nil, nil - } - - var diags tfdiags.Diagnostics - - // Variable nodes evaluate in the parent module to where they were declared - // because the value expression (n.Expr, if set) comes from the calling - // "module" block in the parent module. - // - // Validation expressions are statically validated (during configuration - // loading) to refer only to the variable being validated, so we can - // bypass our usual evaluation machinery here and just produce a minimal - // evaluation context containing just the required value, and thus avoid - // the problem that ctx's evaluation functions refer to the wrong module. - val := ctx.GetVariableValue(n.Addr) - hclCtx := &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - n.Config.Name: val, - }), - }, - Functions: ctx.EvaluationScope(nil, EvalDataForNoInstanceKey).Functions(), - } - - for _, validation := range n.Config.Validations { - const errInvalidCondition = "Invalid variable validation result" - const errInvalidValue = "Invalid value for variable" - - result, moreDiags := validation.Condition.Value(hclCtx) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - log.Printf("[TRACE] evalVariableValidations: %s rule %s condition expression failed: %s", n.Addr, validation.DeclRange, diags.Err().Error()) - } - if !result.IsKnown() { - log.Printf("[TRACE] evalVariableValidations: %s rule %s condition value is unknown, so skipping validation for now", n.Addr, validation.DeclRange) - continue // We'll wait until we've learned more, then. - } - if result.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: "Validation condition expression must return either true or false, not null.", - Subject: validation.Condition.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - continue - } - var err error - result, err = convert.Convert(result, cty.Bool) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: fmt.Sprintf("Invalid validation condition result value: %s.", tfdiags.FormatError(err)), - Subject: validation.Condition.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - continue - } - - if result.False() { - if n.Expr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidValue, - Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", validation.ErrorMessage, validation.DeclRange.String()), - Subject: n.Expr.Range().Ptr(), - }) - } else { - // Since we don't have a source expression for a root module - // variable, we'll just report the error from the perspective - // of the variable declaration itself. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidValue, - Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", validation.ErrorMessage, validation.DeclRange.String()), - Subject: n.Config.DeclRange.Ptr(), - }) - } - } - } - - return nil, diags.ErrWithWarnings() -} - -// hclTypeName returns the name of the type that would represent this value in -// a config file, or falls back to the Go type name if there's no corresponding -// HCL type. This is used for formatted output, not for comparing types. -func hclTypeName(i interface{}) string { - switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k { - case reflect.Bool: - return "boolean" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: - return "number" - case reflect.Array, reflect.Slice: - return "list" - case reflect.Map: - return "map" - case reflect.String: - return "string" - default: - // fall back to the Go type if there's no match - return k.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go deleted file mode 100644 index d4aa94d3..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ /dev/null @@ -1,86 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/providers" -) - -// ProviderEvalTree returns the evaluation tree for initializing and -// configuring providers. -func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { - var provider providers.Interface - - addr := n.Addr - - seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{ - Addr: addr, - }) - - // Input stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - &EvalValidateProvider{ - Addr: addr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - // Apply stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - // We configure on everything but validate, since validate may - // not have access to all the variables. - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalConfigProvider{ - Addr: addr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - return &EvalSequence{Nodes: seq} -} - -// CloseProviderEvalTree returns the evaluation tree for closing -// provider connections that aren't needed anymore. -func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { - return &EvalCloseProvider{Addr: addr} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go deleted file mode 100644 index 128dd7a4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go +++ /dev/null @@ -1,867 +0,0 @@ -package terraform - -import ( - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/agext/levenshtein" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// Evaluator provides the necessary contextual data for evaluating expressions -// for a particular walk operation. -type Evaluator struct { - // Operation defines what type of operation this evaluator is being used - // for. - Operation walkOperation - - // Meta is contextual metadata about the current operation. - Meta *ContextMeta - - // Config is the root node in the configuration tree. - Config *configs.Config - - // VariableValues is a map from variable names to their associated values, - // within the module indicated by ModulePath. VariableValues is modified - // concurrently, and so it must be accessed only while holding - // VariableValuesLock. - // - // The first map level is string representations of addr.ModuleInstance - // values, while the second level is variable names. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - // Schemas is a repository of all of the schemas we should need to - // evaluate expressions. This must be constructed by the caller to - // include schemas for all of the providers, resource types, data sources - // and provisioners used by the given configuration and state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // State is the current state, embedded in a wrapper that ensures that - // it can be safely accessed and modified concurrently. - State *states.SyncState - - // Changes is the set of proposed changes, embedded in a wrapper that - // ensures they can be safely accessed and modified concurrently. - Changes *plans.ChangesSync -} - -// Scope creates an evaluation scope for the given module path and optional -// resource. -// -// If the "self" argument is nil then the "self" object is not available -// in evaluated expressions. Otherwise, it behaves as an alias for the given -// address. -func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { - return &lang.Scope{ - Data: data, - SelfAddr: self, - PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, - BaseDir: ".", // Always current working directory for now. - } -} - -// evaluationStateData is an implementation of lang.Data that resolves -// references primarily (but not exclusively) using information from a State. -type evaluationStateData struct { - Evaluator *Evaluator - - // ModulePath is the path through the dynamic module tree to the module - // that references will be resolved relative to. - ModulePath addrs.ModuleInstance - - // InstanceKeyData describes the values, if any, that are accessible due - // to repetition of a containing object using "count" or "for_each" - // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, - // since the user specifies in that case which variable name to locally - // shadow.) - InstanceKeyData InstanceKeyEvalData - - // Operation records the type of walk the evaluationStateData is being used - // for. - Operation walkOperation -} - -// InstanceKeyEvalData is the old name for instances.RepetitionData, aliased -// here for compatibility. In new code, use instances.RepetitionData instead. -type InstanceKeyEvalData = instances.RepetitionData - -// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for -// evaluating in a context that has the given instance key. -// -// The forEachMap argument can be nil when preparing for evaluation -// in a context where each.value is prohibited, such as a destroy-time -// provisioner. In that case, the returned EachValue will always be -// cty.NilVal. -func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { - var evalData InstanceKeyEvalData - if key == nil { - return evalData - } - - keyValue := key.Value() - switch keyValue.Type() { - case cty.String: - evalData.EachKey = keyValue - evalData.EachValue = forEachMap[keyValue.AsString()] - case cty.Number: - evalData.CountIndex = keyValue - } - return evalData -} - -// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance -// key values at all, suitable for use in contexts where no keyed instance -// is relevant. -var EvalDataForNoInstanceKey = InstanceKeyEvalData{} - -// evaluationStateData must implement lang.Data -var _ lang.Data = (*evaluationStateData)(nil) - -func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "index": - idxVal := d.InstanceKeyData.CountIndex - if idxVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "count" in non-counted context`, - Detail: fmt.Sprintf(`The "count" object can only be used in "module", "resource", and "data" blocks, and only when the "count" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.Number), diags - } - return idxVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "count" attribute`, - Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var returnVal cty.Value - switch addr.Name { - - case "key": - returnVal = d.InstanceKeyData.EachKey - case "value": - returnVal = d.InstanceKeyData.EachValue - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `each.value cannot be used in this context`, - Detail: fmt.Sprintf(`A reference to "each.value" has been used in a context in which it unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "each" attribute`, - Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "each" in context without for_each`, - Detail: fmt.Sprintf(`The "each" object can be used only in "module" or "resource" blocks, and only when the "for_each" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - return returnVal, diags -} - -func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Variables[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Variables { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } else { - suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared input variable`, - Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - wantType := cty.DynamicPseudoType - if config.Type != cty.NilType { - wantType = config.Type - } - - d.Evaluator.VariableValuesLock.Lock() - defer d.Evaluator.VariableValuesLock.Unlock() - - // During the validate walk, input variables are always unknown so - // that we are validating the configuration for all possible input values - // rather than for a specific set. Checking against a specific set of - // input values then happens during the plan walk. - // - // This is important because otherwise the validation walk will tend to be - // overly strict, requiring expressions throughout the configuration to - // be complicated to accommodate all possible inputs, whereas returning - // known here allows for simpler patterns like using input values as - // guards to broadly enable/disable resources, avoid processing things - // that are disabled, etc. Terraform's static validation leans towards - // being liberal in what it accepts because the subsequent plan walk has - // more information available and so can be more conservative. - if d.Operation == walkValidate { - return cty.UnknownVal(wantType), diags - } - - moduleAddrStr := d.ModulePath.String() - vals := d.Evaluator.VariableValues[moduleAddrStr] - if vals == nil { - return cty.UnknownVal(wantType), diags - } - - val, isSet := vals[addr.Name] - if !isSet { - if config.Default != cty.NilVal { - return config.Default, diags - } - return cty.UnknownVal(wantType), diags - } - - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - // We should never get here because this problem should've been caught - // during earlier validation, but we'll do something reasonable anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Incorrect variable type`, - Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), - Subject: &config.DeclRange, - }) - // Stub out our return value so that the semantic checker doesn't - // produce redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - return val, diags -} - -func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Locals[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Locals { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared local value`, - Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) - if val == cty.NilVal { - // Not evaluated yet? - val = cty.DynamicVal - } - - return val, diags -} - -func (d *evaluationStateData) GetModule(addr addrs.ModuleCall, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - moduleAddr := d.ModulePath.Module().Child(addr.Name) - - parentCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - callConfig, ok := parentCfg.Module.ModuleCalls[addr.Name] - if !ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - // We'll consult the configuration to see what output names we are - // expecting, so we can ensure the resulting object is of the expected - // type even if our data is incomplete for some reason. - moduleConfig := d.Evaluator.Config.Descendent(moduleAddr) - if moduleConfig == nil { - // should never happen, since we have a valid module call above, this - // should be caught during static validation. - panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) - } - outputConfigs := moduleConfig.Module.Outputs - - // Collect all the relevant outputs that current exist in the state. - // We know the instance path up to this point, and the child module name, - // so we only need to store these by instance key. - stateMap := map[addrs.InstanceKey]map[string]cty.Value{} - for _, output := range d.Evaluator.State.ModuleOutputs(d.ModulePath, addr) { - _, callInstance := output.Addr.Module.CallInstance() - instance, ok := stateMap[callInstance.Key] - if !ok { - instance = map[string]cty.Value{} - stateMap[callInstance.Key] = instance - } - - instance[output.Addr.OutputValue.Name] = output.Value - } - - // Get all changes that reside for this module call within our path. - // The change contains the full addr, so we can key these with strings. - changesMap := map[addrs.InstanceKey]map[string]*plans.OutputChangeSrc{} - for _, change := range d.Evaluator.Changes.GetOutputChanges(d.ModulePath, addr) { - _, callInstance := change.Addr.Module.CallInstance() - instance, ok := changesMap[callInstance.Key] - if !ok { - instance = map[string]*plans.OutputChangeSrc{} - changesMap[callInstance.Key] = instance - } - - instance[change.Addr.OutputValue.Name] = change - } - - // Build up all the module objects, creating a map of values for each - // module instance. - moduleInstances := map[addrs.InstanceKey]map[string]cty.Value{} - - // create a dummy object type for validation below - unknownMap := map[string]cty.Type{} - - // the structure is based on the configuration, so iterate through all the - // defined outputs, and add any instance state or changes we find. - for _, cfg := range outputConfigs { - // record the output names for validation - unknownMap[cfg.Name] = cty.DynamicPseudoType - - // get all instance output for this path from the state - for key, states := range stateMap { - outputState, ok := states[cfg.Name] - if !ok { - continue - } - - instance, ok := moduleInstances[key] - if !ok { - instance = map[string]cty.Value{} - moduleInstances[key] = instance - } - - instance[cfg.Name] = outputState - } - - // any pending changes override the state state values - for key, changes := range changesMap { - changeSrc, ok := changes[cfg.Name] - if !ok { - continue - } - - instance, ok := moduleInstances[key] - if !ok { - instance = map[string]cty.Value{} - moduleInstances[key] = instance - } - - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) - instance[cfg.Name] = cty.DynamicVal - continue - } - - instance[cfg.Name] = change.After - } - } - - var ret cty.Value - - // compile the outputs into the correct value type for the each mode - switch { - case callConfig.Count != nil: - // figure out what the last index we have is - length := -1 - for key := range moduleInstances { - intKey, ok := key.(addrs.IntKey) - if !ok { - // old key from state which is being dropped - continue - } - if int(intKey) >= length { - length = int(intKey) + 1 - } - } - - if length > 0 { - vals := make([]cty.Value, length) - for key, instance := range moduleInstances { - intKey, ok := key.(addrs.IntKey) - if !ok { - // old key from state which is being dropped - continue - } - - vals[int(intKey)] = cty.ObjectVal(instance) - } - - // Insert unknown values where there are any missing instances - for i, v := range vals { - if v.IsNull() { - vals[i] = cty.DynamicVal - continue - } - } - ret = cty.TupleVal(vals) - } else { - ret = cty.EmptyTupleVal - } - - case callConfig.ForEach != nil: - vals := make(map[string]cty.Value) - for key, instance := range moduleInstances { - strKey, ok := key.(addrs.StringKey) - if !ok { - continue - } - - vals[string(strKey)] = cty.ObjectVal(instance) - } - - if len(vals) > 0 { - ret = cty.ObjectVal(vals) - } else { - ret = cty.EmptyObjectVal - } - - default: - val, ok := moduleInstances[addrs.NoKey] - if !ok { - // create the object if there wasn't one known - val = map[string]cty.Value{} - for k := range outputConfigs { - val[k] = cty.DynamicVal - } - } - - ret = cty.ObjectVal(val) - } - - // The module won't be expanded during validation, so we need to return an - // unknown value. This will ensure the types looks correct, since we built - // the objects based on the configuration. - if d.Operation == walkValidate { - // While we know the type here and it would be nice to validate whether - // indexes are valid or not, because tuples and objects have fixed - // numbers of elements we can't simply return an unknown value of the - // same type since we have not expanded any instances during - // validation. - // - // In order to validate the expression a little precisely, we'll create - // an unknown map or list here to get more type information. - ty := cty.Object(unknownMap) - switch { - case callConfig.Count != nil: - ret = cty.UnknownVal(cty.List(ty)) - case callConfig.ForEach != nil: - ret = cty.UnknownVal(cty.Map(ty)) - default: - ret = cty.UnknownVal(ty) - } - } - - return ret, diags -} - -func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "cwd": - wd, err := os.Getwd() - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed to get working directory`, - Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - return cty.StringVal(filepath.ToSlash(wd)), diags - - case "module": - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) - } - sourceDir := moduleConfig.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - case "root": - sourceDir := d.Evaluator.Config.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - default: - suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "path" attribute`, - Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - // First we'll consult the configuration to see if an resource of this - // name is declared at all. - moduleAddr := d.ModulePath - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) - } - - config := moduleConfig.Module.ResourceByAddr(addr) - if config == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) - - if rs == nil { - // we must return DynamicVal so that both interpretations - // can proceed without generating errors, and we'll deal with this - // in a later step where more information is gathered. - // (In practice we should only end up here during the validate walk, - // since later walks should have at least partial states populated - // for all resources in the configuration.) - return cty.DynamicVal, diags - } - - providerAddr := rs.ProviderConfig - - schema := d.getResourceSchema(addr, providerAddr) - if schema == nil { - // This shouldn't happen, since validation before we get here should've - // taken care of it, but we'll show a reasonable error message anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource type schema`, - Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - ty := schema.ImpliedType() - - // Decode all instances in the current state - instances := map[addrs.InstanceKey]cty.Value{} - for key, is := range rs.Instances { - if is == nil || is.Current == nil { - // Assume we're dealing with an instance that hasn't been created yet. - instances[key] = cty.UnknownVal(ty) - continue - } - - instAddr := addr.Instance(key).Absolute(d.ModulePath) - - // Planned resources are temporarily stored in state with empty values, - // and need to be replaced bu the planned value here. - if is.Current.Status == states.ObjectPlanned { - change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen) - if change == nil { - // If the object is in planned status then we should not get - // here, since we should have found a pending value in the plan - // above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - - instances[key] = val - continue - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here we - // should have upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - instances[key] = ios.Value - } - - var ret cty.Value - - switch { - case config.Count != nil: - // figure out what the last index we have is - length := -1 - for key := range instances { - intKey, ok := key.(addrs.IntKey) - if !ok { - continue - } - if int(intKey) >= length { - length = int(intKey) + 1 - } - } - - if length > 0 { - vals := make([]cty.Value, length) - for key, instance := range instances { - intKey, ok := key.(addrs.IntKey) - if !ok { - // old key from state, which isn't valid for evaluation - continue - } - - vals[int(intKey)] = instance - } - - // Insert unknown values where there are any missing instances - for i, v := range vals { - if v == cty.NilVal { - vals[i] = cty.UnknownVal(ty) - } - } - ret = cty.TupleVal(vals) - } else { - ret = cty.EmptyTupleVal - } - - case config.ForEach != nil: - vals := make(map[string]cty.Value) - for key, instance := range instances { - strKey, ok := key.(addrs.StringKey) - if !ok { - // old key that is being dropped and not used for evaluation - continue - } - vals[string(strKey)] = instance - } - - if len(vals) > 0 { - // We use an object rather than a map here because resource schemas - // may include dynamically-typed attributes, which will then cause - // each instance to potentially have a different runtime type even - // though they all conform to the static schema. - ret = cty.ObjectVal(vals) - } else { - ret = cty.EmptyObjectVal - } - - default: - val, ok := instances[addrs.NoKey] - if !ok { - // if the instance is missing, insert an unknown value - val = cty.UnknownVal(ty) - } - - ret = val - } - - // since the plan was not yet created during validate, the values we - // collected here may not correspond with configuration, so they must be - // unknown. - if d.Operation == walkValidate { - // While we know the type here and it would be nice to validate whether - // indexes are valid or not, because tuples and objects have fixed - // numbers of elements we can't simply return an unknown value of the - // same type since we have not expanded any instances during - // validation. - // - // In order to validate the expression a little precisely, we'll create - // an unknown map or list here to get more type information. - switch { - case config.Count != nil: - ret = cty.UnknownVal(cty.List(ty)) - case config.ForEach != nil: - ret = cty.UnknownVal(cty.Map(ty)) - default: - ret = cty.UnknownVal(ty) - } - } - - return ret, diags -} - -func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { - schemas := d.Evaluator.Schemas - schema, _ := schemas.ResourceTypeConfig(providerAddr.Provider, addr.Mode, addr.Type) - return schema -} - -func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "workspace": - workspaceName := d.Evaluator.Meta.Env - return cty.StringVal(workspaceName), diags - - case "env": - // Prior to Terraform 0.12 there was an attribute "env", which was - // an alias name for "workspace". This was deprecated and is now - // removed. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} - -// moduleDisplayAddr returns a string describing the given module instance -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleDisplayAddr(addr addrs.ModuleInstance) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go deleted file mode 100644 index 86475a60..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go +++ /dev/null @@ -1,296 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/helper/didyoumean" - "github.com/hashicorp/terraform/tfdiags" -) - -// StaticValidateReferences checks the given references against schemas and -// other statically-checkable rules, producing error diagnostics if any -// problems are found. -// -// If this method returns errors for a particular reference then evaluating -// that reference is likely to generate a very similar error, so callers should -// not run this method and then also evaluate the source expression(s) and -// merge the two sets of diagnostics together, since this will result in -// confusing redundant errors. -// -// This method can find more errors than can be found by evaluating an -// expression with a partially-populated scope, since it checks the referenced -// names directly against the schema rather than relying on evaluation errors. -// -// The result may include warning diagnostics if, for example, deprecated -// features are referenced. -func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, ref := range refs { - moreDiags := d.staticValidateReference(ref, self) - diags = diags.Append(moreDiags) - } - return diags -} - -func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if modCfg == nil { - // This is a bug in the caller rather than a problem with the - // reference, but rather than crashing out here in an unhelpful way - // we'll just ignore it and trust a different layer to catch it. - return nil - } - - if ref.Subject == addrs.Self { - // The "self" address is a special alias for the address given as - // our self parameter here, if present. - if self == nil { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - return diags - } - - synthRef := *ref // shallow copy - synthRef.Subject = self - ref = &synthRef - } - - switch addr := ref.Subject.(type) { - - // For static validation we validate both resource and resource instance references the same way. - // We mostly disregard the index, though we do some simple validation of - // its _presence_ in staticValidateSingleResourceReference and - // staticValidateMultiResourceReference respectively. - case addrs.Resource: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - return diags - case addrs.ResourceInstance: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) - return diags - - // We also handle all module call references the same way, disregarding index. - case addrs.ModuleCall: - return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallInstance: - return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) - case addrs.AbsModuleCallOutput: - // This one is a funny one because we will take the output name referenced - // and use it to fake up a "remaining" that would make sense for the - // module call itself, rather than for the specific output, and then - // we can just re-use our static module call validation logic. - remain := make(hcl.Traversal, len(ref.Remaining)+1) - copy(remain[1:], ref.Remaining) - remain[0] = hcl.TraverseAttr{ - Name: addr.Name, - - // Using the whole reference as the source range here doesn't exactly - // match how HCL would normally generate an attribute traversal, - // but is close enough for our purposes. - SrcRange: ref.SourceRange.ToHCL(), - } - return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) - - default: - // Anything else we'll just permit through without any static validation - // and let it be caught during dynamic evaluation, in evaluate.go . - return nil - } -} - -func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - // If we have at least one step in "remain" and this resource has - // "count" set then we know for sure this in invalid because we have - // something like: - // aws_instance.foo.bar - // ...when we really need - // aws_instance.foo[count.index].bar - - // It is _not_ safe to do this check when remain is empty, because that - // would also match aws_instance.foo[count.index].bar due to `count.index` - // not being statically-resolvable as part of a reference, and match - // direct references to the whole aws_instance.foo tuple. - if len(remain) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if cfg.Count != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - if cfg.ForEach != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - - return diags -} - -func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if addr.Key == addrs.NoKey { - // This is a different path into staticValidateSingleResourceReference - return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) - } else { - if cfg.Count == nil && cfg.ForEach == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unexpected resource instance key`, - Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), - Subject: rng.ToHCL().Ptr(), - }) - } - } - - return diags -} - -func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - var modeAdjective string - switch addr.Mode { - case addrs.ManagedResourceMode: - modeAdjective = "managed" - case addrs.DataResourceMode: - modeAdjective = "data" - default: - // should never happen - modeAdjective = "" - } - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - providerFqn := modCfg.Module.ProviderForLocalConfig(cfg.ProviderConfigAddr()) - schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerFqn, addr.Mode, addr.Type) - - if schema == nil { - // Prior validation should've taken care of a resource block with an - // unsupported type, so we should never get here but we'll handle it - // here anyway for robustness. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource type`, - Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerFqn.String()), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // As a special case we'll detect attempts to access an attribute called - // "count" and produce a special error for it, since versions of Terraform - // prior to v0.12 offered this as a weird special case that we can no - // longer support. - if len(remain) > 0 { - if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource count attribute`, - Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - } - - // If we got this far then we'll try to validate the remaining traversal - // steps against our schema. - moreDiags := schema.StaticValidateTraversal(remain) - diags = diags.Append(moreDiags) - - return diags -} - -func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // For now, our focus here is just in testing that the referenced module - // call exists. All other validation is deferred until evaluation time. - _, exists := modCfg.Module.ModuleCalls[addr.Name] - if !exists { - var suggestions []string - for name := range modCfg.Module.ModuleCalls { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - return diags -} - -// moduleConfigDisplayAddr returns a string describing the given module -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleConfigDisplayAddr(addr addrs.Module) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go deleted file mode 100644 index 97c77bdb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/features.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -import "os" - -// This file holds feature flags for the next release - -var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go deleted file mode 100644 index 4c9f2f0c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ /dev/null @@ -1,107 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/terraform/dag" -) - -// Graph represents the graph that Terraform uses to represent resources -// and their dependencies. -type Graph struct { - // Graph is the actual DAG. This is embedded so you can call the DAG - // methods directly. - dag.AcyclicGraph - - // Path is the path in the module tree that this Graph represents. - Path addrs.ModuleInstance -} - -func (g *Graph) DirectedGraph() dag.Grapher { - return &g.AcyclicGraph -} - -// Walk walks the graph with the given walker for callbacks. The graph -// will be walked with full parallelism, so the walker should expect -// to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { - return g.walk(walker) -} - -func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { - // The callbacks for enter/exiting a graph - ctx := walker.EvalContext() - - // Walk the graph. - var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) - - defer func() { - log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) - }() - - walker.EnterVertex(v) - defer walker.ExitVertex(v, diags) - - // vertexCtx is the context that we use when evaluating. This - // is normally the context of our graph but can be overridden - // with a GraphNodeModuleInstance impl. - vertexCtx := ctx - if pn, ok := v.(GraphNodeModuleInstance); ok { - vertexCtx = walker.EnterPath(pn.Path()) - defer walker.ExitPath(pn.Path()) - } - - // If the node is eval-able, then evaluate it. - if ev, ok := v.(GraphNodeEvalable); ok { - tree := ev.EvalTree() - if tree == nil { - panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) - } - - // Allow the walker to change our tree if needed. Eval, - // then callback with the output. - log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) - - tree = walker.EnterEvalTree(v, tree) - output, err := Eval(tree, vertexCtx) - diags = diags.Append(walker.ExitEvalTree(v, output, err)) - if diags.HasErrors() { - return - } - } - - // If the node is dynamically expanded, then expand it - if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) - - g, err := ev.DynamicExpand(vertexCtx) - if err != nil { - diags = diags.Append(err) - return - } - if g != nil { - // Walk the subgraph - log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) - subDiags := g.walk(walker) - diags = diags.Append(subDiags) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) - } else { - log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) - } - } - return - } - - return g.AcyclicGraph.Walk(walkFn) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go deleted file mode 100644 index f631f83b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go +++ /dev/null @@ -1,77 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/tfdiags" -) - -// GraphBuilder is an interface that can be implemented and used with -// Terraform to build the graph that Terraform walks. -type GraphBuilder interface { - // Build builds the graph for the given module path. It is up to - // the interface implementation whether this build should expand - // the graph or not. - Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) -} - -// BasicGraphBuilder is a GraphBuilder that builds a graph out of a -// series of transforms and (optionally) validates the graph is a valid -// structure. -type BasicGraphBuilder struct { - Steps []GraphTransformer - Validate bool - // Optional name to add to the graph debug log - Name string -} - -func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - g := &Graph{Path: path} - - var lastStepStr string - for _, step := range b.Steps { - if step == nil { - continue - } - log.Printf("[TRACE] Executing graph transform %T", step) - - stepName := fmt.Sprintf("%T", step) - dot := strings.LastIndex(stepName, ".") - if dot >= 0 { - stepName = stepName[dot+1:] - } - - err := step.Transform(g) - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s ------", step, logging.Indent(thisStepStr)) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] Completed graph transform %T (no changes)", step) - } - - if err != nil { - if nf, isNF := err.(tfdiags.NonFatalError); isNF { - diags = diags.Append(nf.Diagnostics) - } else { - diags = diags.Append(err) - return g, diags - } - } - } - - // Validate the graph structure - if b.Validate { - if err := g.Validate(); err != nil { - log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - diags = diags.Append(err) - return nil, diags - } - } - - return g, diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go deleted file mode 100644 index 5b96d6af..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ /dev/null @@ -1,202 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ApplyGraphBuilder implements GraphBuilder and is responsible for building -// a graph for applying a Terraform diff. -// -// Because the graph is built from the diff (vs. the config or state), -// this helps ensure that the apply-time graph doesn't modify any resources -// that aren't explicitly in the diff. There are other scenarios where the -// diff can be deviated, so this is just one layer of protection. -type ApplyGraphBuilder struct { - // Config is the configuration tree that the diff was built from. - Config *configs.Config - - // Changes describes the changes that we need apply. - Changes *plans.Changes - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target. This is only required to make sure - // unnecessary outputs aren't included in the apply graph. The plan - // builder successfully handles targeting resources. In the future, - // outputs should go into the diff so that this is unnecessary. - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Destroy, if true, represents a pure destroy operation - Destroy bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "ApplyGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandApplyableResource{ - NodeAbstractResource: a, - } - } - - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeApplyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config. During apply, - // we use this just to ensure that the whole-resource metadata is - // updated to reflect things such as whether the count argument is - // set in config, or which provider configuration manages each resource. - &ConfigTransformer{ - Concrete: concreteResource, - Config: b.Config, - }, - - // Creates all the resource instances represented in the diff, along - // with dependency edges against the whole-resource nodes added by - // ConfigTransformer above. - &DiffTransformer{ - Concrete: concreteResourceInstance, - State: b.State, - Changes: b.Changes, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{Config: b.Config, State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // add providers - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{Config: b.Config}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - &AttachDependenciesTransformer{}, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Destruction ordering - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - &CBDEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - // Create a destroy node for root outputs to remove them from the - // state. This does nothing unless invoked via the destroy command - // directly. A destroy is identical to a normal apply, except for the - // fact that we also have configuration to evaluate. While the rest of - // the unused nodes can be programmatically pruned (via - // pruneUnusedNodesTransformer), root module outputs only have an - // implied dependency on remote state. This means that if they exist in - // the configuration, the only signal to remove them is via the destroy - // command itself. - &destroyRootOutputTransformer{Destroy: b.Destroy}, - - // We need to remove configuration nodes that are not used at all, as - // they may not be able to evaluate, especially during destroy. - // These include variables, locals, and instance expanders. - &pruneUnusedNodesTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // close the root module - &CloseRootModuleTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go deleted file mode 100644 index 94cc90d6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for -// planning a pure-destroy. -// -// Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. -type DestroyPlanGraphBuilder struct { - // Config is the configuration tree to build the plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "DestroyPlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlanDestroyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates nodes for the resource instances tracked in the state. - &StateTransformer{ - ConcreteCurrent: concreteResourceInstance, - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Destruction ordering. We require this only so that - // targeting below will prune the correct things. - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. - &TargetsTransformer{Targets: b.Targets}, - - // Close the root module - &CloseRootModuleTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go deleted file mode 100644 index 31aaf0cf..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go +++ /dev/null @@ -1,115 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable -// for evaluating in-memory values (input variables, local values, output -// values) in the state without any other side-effects. -// -// This graph is used only in weird cases, such as the "terraform console" -// CLI command, where we need to evaluate expressions against the state -// without taking any other actions. -// -// The generated graph will include nodes for providers, resources, etc -// just to allow indirect dependencies to be resolved, but these nodes will -// not take any actions themselves since we assume that their parts of the -// state, if any, are already complete. -// -// Although the providers are never configured, they must still be available -// in order to obtain schema information used for type checking, etc. -type EvalGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "EvalGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Steps() []GraphTransformer { - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeEvalableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: nil, // just use the abstract type - Config: b.Config, - Unique: true, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{ - Config: b.Config, - }, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Although we don't configure providers, we do still start them up - // to get their schemas, and so we must shut them down again here. - &CloseProviderTransformer{}, - - // Close root module - &CloseRootModuleTransformer{}, - - // Remove redundant edges to simplify the graph. - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go deleted file mode 100644 index 9acb9899..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportGraphBuilder implements GraphBuilder and is responsible for building -// a graph for importing resources into Terraform. This is a much, much -// simpler graph than a normal configuration graph. -type ImportGraphBuilder struct { - // ImportTargets are the list of resources to import. - ImportTargets []*ImportTarget - - // Module is a configuration to build the graph from. See ImportOpts.Config. - Config *configs.Config - - // Components is the factory for our available plugin components. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "ImportGraphBuilder", - }).Build(path) -} - -// Steps returns the ordered list of GraphTransformers that must be executed -// to build a complete graph. -func (b *ImportGraphBuilder) Steps() []GraphTransformer { - // Get the module. If we don't have one, we just use an empty tree - // so that the transform still works but does nothing. - config := b.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Create all our resources from the configuration and state - &ConfigTransformer{Config: config}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add the import steps - &ImportStateTransformer{Targets: b.ImportTargets, Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas, Config: b.Config}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // This validates that the providers only depend on variables - &ImportProviderValidateTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Close root module - &CloseRootModuleTransformer{}, - - // Optimize - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go deleted file mode 100644 index 9c3ffed8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ /dev/null @@ -1,217 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// PlanGraphBuilder implements GraphBuilder and is responsible for building -// a graph for planning (creating a Terraform Diff). -// -// The primary difference between this graph and others: -// -// * Based on the config since it represents the target state -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type PlanGraphBuilder struct { - // Config is the configuration tree to build a plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool - - // CustomConcrete can be set to customize the node types created - // for various parts of the plan. This is useful in order to customize - // the plan behavior. - CustomConcrete bool - ConcreteProvider ConcreteProviderNodeFunc - ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc - ConcreteModule ConcreteModuleNodeFunc - - once sync.Once -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "PlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Steps() []GraphTransformer { - b.once.Do(b.init) - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config - &ConfigTransformer{ - Concrete: b.ConcreteResource, - Config: b.Config, - }, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add orphan resources - &OrphanResourceInstanceTransformer{ - Concrete: b.ConcreteResourceOrphan, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can plan to destroy them. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{ - Config: b.Config, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add module variables - &ModuleVariableTransformer{ - Config: b.Config, - }, - - TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{ - Concrete: b.ConcreteModule, - Config: b.Config, - }, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Make sure data sources are aware of any depends_on from the - // configuration - &attachDataResourceDependenciesTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Close the root module - &CloseRootModuleTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} - -func (b *PlanGraphBuilder) init() { - // Do nothing if the user requests customizing the fields - if b.CustomConcrete { - return - } - - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandPlannableResource{ - NodeAbstractResource: a, - } - } - - b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go deleted file mode 100644 index e34d0564..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ /dev/null @@ -1,200 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// RefreshGraphBuilder implements GraphBuilder and is responsible for building -// a graph for refreshing (updating the Terraform state). -// -// The primary difference between this graph and others: -// -// * Based on the state since it represents the only resources that -// need to be refreshed. -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type RefreshGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the prior state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "RefreshGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandRefreshableManagedResource{ - NodeAbstractResource: a, - } - } - - concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - // The "Plan" node type also handles refreshing behavior. - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { - return &nodeExpandRefreshableDataResource{ - NodeAbstractResource: a, - } - } - - steps := []GraphTransformer{ - // Creates all the managed resources that aren't in the state, but only if - // we have a state already. No resources in state means there's not - // anything to refresh. - func() GraphTransformer { - if b.State.HasResources() { - return &ConfigTransformer{ - Concrete: concreteManagedResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.ManagedResourceMode, - } - } - log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") - return nil - }(), - - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: concreteDataResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.DataResourceMode, - }, - - // Add any fully-orphaned resources from config (ones that have been - // removed completely, not ones that are just orphaned due to a scaled-in - // count. - &OrphanResourceInstanceTransformer{ - Concrete: concreteManagedResourceInstance, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can check if they still exist. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas, Config: b.Config}, - - // Create expansion nodes for all of the module calls. This must - // come after all other transformers that create nodes representing - // objects that can belong to modules. - &ModuleExpansionTransformer{Config: b.Config}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - &AttachDependenciesTransformer{}, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Close root module - &CloseRootModuleTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go deleted file mode 100644 index 57d4c7b7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// ValidateGraphBuilder creates the graph for the validate operation. -// -// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that -// we only have to validate what we'd normally plan anyways. The -// PlanGraphBuilder given will be modified so it shouldn't be used for anything -// else after calling this function. -func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodeValidatableResource{ - NodeAbstractResource: a, - } - } - - p.ConcreteModule = func(n *nodeExpandModule) dag.Vertex { - return &nodeValidateModule{ - nodeExpandModule: *n, - } - } - - // We purposely don't set any other concrete types since they don't - // require validation. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go deleted file mode 100644 index 73e3821f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/dag" - -// GraphDot returns the dot formatting of a visual representation of -// the given Terraform graph. -func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { - return string(g.Dot(opts)), nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go deleted file mode 100644 index 9ff6e763..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" -) - -// GraphNodeModuleInstance says that a node is part of a graph with a -// different path, and the context should be adjusted accordingly. -type GraphNodeModuleInstance interface { - Path() addrs.ModuleInstance -} - -// GraphNodeModulePath is implemented by all referenceable nodes, to indicate -// their configuration path in unexpanded modules. -type GraphNodeModulePath interface { - ModulePath() addrs.Module -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go deleted file mode 100644 index 706b7e0a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go +++ /dev/null @@ -1,34 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// GraphWalker is an interface that can be implemented that when used -// with Graph.Walk will invoke the given callbacks under certain events. -type GraphWalker interface { - EvalContext() EvalContext - EnterPath(addrs.ModuleInstance) EvalContext - ExitPath(addrs.ModuleInstance) - EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, tfdiags.Diagnostics) - EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics -} - -// NullGraphWalker is a GraphWalker implementation that does nothing. -// This can be embedded within other GraphWalker implementations for easily -// implementing all the required functions. -type NullGraphWalker struct{} - -func (NullGraphWalker) EvalContext() EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} -func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} -func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go deleted file mode 100644 index 5025c98b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ /dev/null @@ -1,164 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/instances" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ContextGraphWalker is the GraphWalker implementation used with the -// Context struct to walk and evaluate the graph. -type ContextGraphWalker struct { - NullGraphWalker - - // Configurable values - Context *Context - State *states.SyncState // Used for safe concurrent access to state - Changes *plans.ChangesSync // Used for safe concurrent writes to changes - InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances - Operation walkOperation - StopContext context.Context - RootVariableValues InputValues - - // This is an output. Do not set this, nor read it while a graph walk - // is in progress. - NonFatalDiagnostics tfdiags.Diagnostics - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - variableValues map[string]map[string]cty.Value - variableValuesLock sync.Mutex - providerCache map[string]providers.Interface - providerSchemas map[string]*ProviderSchema - providerLock sync.Mutex - provisionerCache map[string]provisioners.Interface - provisionerSchemas map[string]*configschema.Block - provisionerLock sync.Mutex -} - -func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { - w.contextLock.Lock() - defer w.contextLock.Unlock() - - // If we already have a context for this path cached, use that - key := path.String() - if ctx, ok := w.contexts[key]; ok { - return ctx - } - - ctx := w.EvalContext().WithPath(path) - w.contexts[key] = ctx.(*BuiltinEvalContext) - return ctx -} - -func (w *ContextGraphWalker) EvalContext() EvalContext { - w.once.Do(w.init) - - // Our evaluator shares some locks with the main context and the walker - // so that we can safely run multiple evaluations at once across - // different modules. - evaluator := &Evaluator{ - Meta: w.Context.meta, - Config: w.Context.config, - Operation: w.Operation, - State: w.State, - Changes: w.Changes, - Schemas: w.Context.schemas, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - InstanceExpanderValue: w.InstanceExpander, - Components: w.Context.components, - Schemas: w.Context.schemas, - ProviderCache: w.providerCache, - ProviderInputConfig: w.Context.providerInputConfig, - ProviderLock: &w.providerLock, - ProvisionerCache: w.provisionerCache, - ProvisionerLock: &w.provisionerLock, - ChangesValue: w.Changes, - StateValue: w.State, - Evaluator: evaluator, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - return ctx -} - -func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) - - // Acquire a lock on the semaphore - w.Context.parallelSem.Acquire() - - // We want to filter the evaluation tree to only include operations - // that belong in this operation. - return EvalFilter(n, EvalNodeFilterOp(w.Operation)) -} - -func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) - - // Release the semaphore - w.Context.parallelSem.Release() - - if err == nil { - return nil - } - - // Acquire the lock because anything is going to require a lock. - w.errorLock.Lock() - defer w.errorLock.Unlock() - - // If the error is non-fatal then we'll accumulate its diagnostics in our - // non-fatal list, rather than returning it directly, so that the graph - // walk can continue. - if nferr, ok := err.(tfdiags.NonFatalError); ok { - log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) - w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) - return nil - } - - // Otherwise, we'll let our usual diagnostics machinery figure out how to - // unpack this as one or more diagnostic messages and return that. If we - // get down here then the returned diagnostics will contain at least one - // error, causing the graph walk to halt. - var diags tfdiags.Diagnostics - diags = diags.Append(err) - return diags -} - -func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext) - w.providerCache = make(map[string]providers.Interface) - w.providerSchemas = make(map[string]*ProviderSchema) - w.provisionerCache = make(map[string]provisioners.Interface) - w.provisionerSchemas = make(map[string]*configschema.Block) - w.variableValues = make(map[string]map[string]cty.Value) - - // Populate root module variable values. Other modules will be populated - // during the graph walk. - w.variableValues[""] = make(map[string]cty.Value) - for k, iv := range w.RootVariableValues { - w.variableValues[""][k] = iv.Value - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go deleted file mode 100644 index 859f6fb1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go +++ /dev/null @@ -1,18 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go - -// walkOperation is an enum which tells the walkContext what to do. -type walkOperation byte - -const ( - walkInvalid walkOperation = iota - walkApply - walkPlan - walkPlanDestroy - walkRefresh - walkValidate - walkDestroy - walkImport - walkEval // used just to prepare EvalContext for expression evaluation, with no other actions -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go deleted file mode 100644 index b51e1a26..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[GraphTypeInvalid-0] - _ = x[GraphTypeLegacy-1] - _ = x[GraphTypeRefresh-2] - _ = x[GraphTypePlan-3] - _ = x[GraphTypePlanDestroy-4] - _ = x[GraphTypeApply-5] - _ = x[GraphTypeValidate-6] - _ = x[GraphTypeEval-7] -} - -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" - -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} - -func (i GraphType) String() string { - if i >= GraphType(len(_GraphType_index)-1) { - return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go deleted file mode 100644 index c0bb23ab..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook.go +++ /dev/null @@ -1,161 +0,0 @@ -package terraform - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// HookAction is an enum of actions that can be taken as a result of a hook -// callback. This allows you to modify the behavior of Terraform at runtime. -type HookAction byte - -const ( - // HookActionContinue continues with processing as usual. - HookActionContinue HookAction = iota - - // HookActionHalt halts immediately: no more hooks are processed - // and the action that Terraform was about to take is cancelled. - HookActionHalt -) - -// Hook is the interface that must be implemented to hook into various -// parts of Terraform, allowing you to inspect or change behavior at runtime. -// -// There are MANY hook points into Terraform. If you only want to implement -// some hook points, but not all (which is the likely case), then embed the -// NilHook into your struct, which implements all of the interface but does -// nothing. Then, override only the functions you want to implement. -type Hook interface { - // PreApply and PostApply are called before and after an action for a - // single instance is applied. The error argument in PostApply is the - // error, if any, that was returned from the provider Apply call itself. - PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a provider is given - // the opportunity to customize the proposed new state to produce the - // planned new state. - PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) - PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - - // The provisioning hooks signal both the overall start end end of - // provisioning for a particular instance and of each of the individual - // configured provisioners for each instance. The sequence of these - // for a given instance might look something like this: - // - // PreProvisionInstance(aws_instance.foo[1], ...) - // PreProvisionInstanceStep(aws_instance.foo[1], "file") - // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) - // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") - // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) - // PostProvisionInstance(aws_instance.foo[1], ...) - // - // ProvisionOutput is called with output sent back by the provisioners. - // This will be called multiple times as output comes in, with each call - // representing one line of output. It cannot control whether the - // provisioner continues running. - PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) - PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) - ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) - - // PreRefresh and PostRefresh are called before and after a single - // resource state is refreshed, respectively. - PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) - PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) - - // PreImportState and PostImportState are called before and after - // (respectively) each state import operation for a given resource address. - PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) - PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) - - // PostStateUpdate is called each time the state is updated. It receives - // a deep copy of the state, which it may therefore access freely without - // any need for locks to protect from concurrent writes from the caller. - PostStateUpdate(new *states.State) (HookAction, error) -} - -// NilHook is a Hook implementation that does nothing. It exists only to -// simplify implementing hooks. You can embed this into your Hook implementation -// and only implement the functions you are interested in. -type NilHook struct{} - -var _ Hook = (*NilHook)(nil) - -func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { - return HookActionContinue, nil -} - -// handleHook turns hook actions into panics. This lets you use the -// panic/recover mechanism in Go as a flow control mechanism for hook -// actions. -func handleHook(a HookAction, err error) { - if err != nil { - // TODO: handle errors - } - - switch a { - case HookActionContinue: - return - case HookActionHalt: - panic(HookActionHalt) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go deleted file mode 100644 index 6efa3196..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go +++ /dev/null @@ -1,274 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// MockHook is an implementation of Hook that can be used for tests. -// It records all of its function calls. -type MockHook struct { - sync.Mutex - - PreApplyCalled bool - PreApplyAddr addrs.AbsResourceInstance - PreApplyGen states.Generation - PreApplyAction plans.Action - PreApplyPriorState cty.Value - PreApplyPlannedState cty.Value - PreApplyReturn HookAction - PreApplyError error - - PostApplyCalled bool - PostApplyAddr addrs.AbsResourceInstance - PostApplyGen states.Generation - PostApplyNewState cty.Value - PostApplyError error - PostApplyReturn HookAction - PostApplyReturnError error - PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) - - PreDiffCalled bool - PreDiffAddr addrs.AbsResourceInstance - PreDiffGen states.Generation - PreDiffPriorState cty.Value - PreDiffProposedState cty.Value - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffAddr addrs.AbsResourceInstance - PostDiffGen states.Generation - PostDiffAction plans.Action - PostDiffPriorState cty.Value - PostDiffPlannedState cty.Value - PostDiffReturn HookAction - PostDiffError error - - PreProvisionInstanceCalled bool - PreProvisionInstanceAddr addrs.AbsResourceInstance - PreProvisionInstanceState cty.Value - PreProvisionInstanceReturn HookAction - PreProvisionInstanceError error - - PostProvisionInstanceCalled bool - PostProvisionInstanceAddr addrs.AbsResourceInstance - PostProvisionInstanceState cty.Value - PostProvisionInstanceReturn HookAction - PostProvisionInstanceError error - - PreProvisionInstanceStepCalled bool - PreProvisionInstanceStepAddr addrs.AbsResourceInstance - PreProvisionInstanceStepProvisionerType string - PreProvisionInstanceStepReturn HookAction - PreProvisionInstanceStepError error - - PostProvisionInstanceStepCalled bool - PostProvisionInstanceStepAddr addrs.AbsResourceInstance - PostProvisionInstanceStepProvisionerType string - PostProvisionInstanceStepErrorArg error - PostProvisionInstanceStepReturn HookAction - PostProvisionInstanceStepError error - - ProvisionOutputCalled bool - ProvisionOutputAddr addrs.AbsResourceInstance - ProvisionOutputProvisionerType string - ProvisionOutputMessage string - - PreRefreshCalled bool - PreRefreshAddr addrs.AbsResourceInstance - PreRefreshGen states.Generation - PreRefreshPriorState cty.Value - PreRefreshReturn HookAction - PreRefreshError error - - PostRefreshCalled bool - PostRefreshAddr addrs.AbsResourceInstance - PostRefreshGen states.Generation - PostRefreshPriorState cty.Value - PostRefreshNewState cty.Value - PostRefreshReturn HookAction - PostRefreshError error - - PreImportStateCalled bool - PreImportStateAddr addrs.AbsResourceInstance - PreImportStateID string - PreImportStateReturn HookAction - PreImportStateError error - - PostImportStateCalled bool - PostImportStateAddr addrs.AbsResourceInstance - PostImportStateNewStates []providers.ImportedResource - PostImportStateReturn HookAction - PostImportStateError error - - PostStateUpdateCalled bool - PostStateUpdateState *states.State - PostStateUpdateReturn HookAction - PostStateUpdateError error -} - -var _ Hook = (*MockHook)(nil) - -func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreApplyCalled = true - h.PreApplyAddr = addr - h.PreApplyGen = gen - h.PreApplyAction = action - h.PreApplyPriorState = priorState - h.PreApplyPlannedState = plannedNewState - return h.PreApplyReturn, h.PreApplyError -} - -func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostApplyCalled = true - h.PostApplyAddr = addr - h.PostApplyGen = gen - h.PostApplyNewState = newState - h.PostApplyError = err - - if h.PostApplyFn != nil { - return h.PostApplyFn(addr, gen, newState, err) - } - - return h.PostApplyReturn, h.PostApplyReturnError -} - -func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreDiffCalled = true - h.PreDiffAddr = addr - h.PreDiffGen = gen - h.PreDiffPriorState = priorState - h.PreDiffProposedState = proposedNewState - return h.PreDiffReturn, h.PreDiffError -} - -func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostDiffCalled = true - h.PostDiffAddr = addr - h.PostDiffGen = gen - h.PostDiffAction = action - h.PostDiffPriorState = priorState - h.PostDiffPlannedState = plannedNewState - return h.PostDiffReturn, h.PostDiffError -} - -func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceCalled = true - h.PreProvisionInstanceAddr = addr - h.PreProvisionInstanceState = state - return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError -} - -func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceCalled = true - h.PostProvisionInstanceAddr = addr - h.PostProvisionInstanceState = state - return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError -} - -func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceStepCalled = true - h.PreProvisionInstanceStepAddr = addr - h.PreProvisionInstanceStepProvisionerType = typeName - return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError -} - -func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceStepCalled = true - h.PostProvisionInstanceStepAddr = addr - h.PostProvisionInstanceStepProvisionerType = typeName - h.PostProvisionInstanceStepErrorArg = err - return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError -} - -func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { - h.Lock() - defer h.Unlock() - - h.ProvisionOutputCalled = true - h.ProvisionOutputAddr = addr - h.ProvisionOutputProvisionerType = typeName - h.ProvisionOutputMessage = line -} - -func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreRefreshCalled = true - h.PreRefreshAddr = addr - h.PreRefreshGen = gen - h.PreRefreshPriorState = priorState - return h.PreRefreshReturn, h.PreRefreshError -} - -func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostRefreshCalled = true - h.PostRefreshAddr = addr - h.PostRefreshPriorState = priorState - h.PostRefreshNewState = newState - return h.PostRefreshReturn, h.PostRefreshError -} - -func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreImportStateCalled = true - h.PreImportStateAddr = addr - h.PreImportStateID = importID - return h.PreImportStateReturn, h.PreImportStateError -} - -func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostImportStateCalled = true - h.PostImportStateAddr = addr - h.PostImportStateNewStates = imported - return h.PostImportStateReturn, h.PostImportStateError -} - -func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostStateUpdateCalled = true - h.PostStateUpdateState = new - return h.PostStateUpdateReturn, h.PostStateUpdateError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go deleted file mode 100644 index 811fb337..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "sync/atomic" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// stopHook is a private Hook implementation that Terraform uses to -// signal when to stop or cancel actions. -type stopHook struct { - stop uint32 -} - -var _ Hook = (*stopHook)(nil) - -func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) hook() (HookAction, error) { - if h.Stopped() { - // FIXME: This should really return an error since stopping partway - // through is not a successful run-to-completion, but we'll need to - // introduce that cautiously since existing automation solutions may - // be depending on this behavior. - return HookActionHalt, nil - } - - return HookActionContinue, nil -} - -// reset should be called within the lock context -func (h *stopHook) Reset() { - atomic.StoreUint32(&h.stop, 0) -} - -func (h *stopHook) Stop() { - atomic.StoreUint32(&h.stop, 1) -} - -func (h *stopHook) Stopped() bool { - return atomic.LoadUint32(&h.stop) == 1 -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/instance_expanders.go b/vendor/github.com/hashicorp/terraform/terraform/instance_expanders.go deleted file mode 100644 index b3733afb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instance_expanders.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// graphNodeExpandsInstances is implemented by nodes that causes instances to -// be registered in the instances.Expander. -type graphNodeExpandsInstances interface { - expandsInstances() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go deleted file mode 100644 index 375a8638..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go - -// InstanceType is an enum of the various types of instances store in the State -type InstanceType int - -const ( - TypeInvalid InstanceType = iota - TypePrimary - TypeTainted - TypeDeposed -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go deleted file mode 100644 index 95b7a980..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeInvalid-0] - _ = x[TypePrimary-1] - _ = x[TypeTainted-2] - _ = x[TypeDeposed-3] -} - -const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" - -var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} - -func (i InstanceType) String() string { - if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go deleted file mode 100644 index e4952039..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go +++ /dev/null @@ -1,22 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs" -) - -// NodeCountBoundary fixes up any transitions between "each modes" in objects -// saved in state, such as switching from NoEach to EachInt. -type NodeCountBoundary struct { - Config *configs.Config -} - -func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (EachMode fixup)" -} - -// GraphNodeEvalable -func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{ - Config: n.Config, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go deleted file mode 100644 index 6ba39904..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": -// it is ready to be destroyed. -type NodeDestroyableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var providerSchema *ProviderSchema - // We don't need the provider, but we're calling EvalGetProvider to load the - // schema. - var provider providers.Interface - - // Just destroy it. - var state *states.ResourceInstanceObject - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go deleted file mode 100644 index 98eda631..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ /dev/null @@ -1,276 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -type nodeExpandRefreshableDataResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeDynamicExpandable = (*nodeExpandRefreshableDataResource)(nil) - _ GraphNodeReferenceable = (*nodeExpandRefreshableDataResource)(nil) - _ GraphNodeReferencer = (*nodeExpandRefreshableDataResource)(nil) - _ GraphNodeConfigResource = (*nodeExpandRefreshableDataResource)(nil) - _ GraphNodeAttachResourceConfig = (*nodeExpandRefreshableDataResource)(nil) -) - -func (n *nodeExpandRefreshableDataResource) Name() string { - return n.NodeAbstractResource.Name() + " (expand)" -} - -func (n *nodeExpandRefreshableDataResource) References() []*addrs.Reference { - return (&NodeRefreshableManagedResource{NodeAbstractResource: n.NodeAbstractResource}).References() -} - -func (n *nodeExpandRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Addr.Module) { - g.Add(&NodeRefreshableDataResource{ - NodeAbstractResource: n.NodeAbstractResource, - Addr: n.Addr.Resource.Absolute(module), - }) - } - - return &g, nil -} - -// NodeRefreshableDataResource represents a resource that is "refreshable". -type NodeRefreshableDataResource struct { - *NodeAbstractResource - - Addr addrs.AbsResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeRefreshableDataResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) - _ GraphNodeConfigResource = (*NodeRefreshableDataResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResource)(nil) -) - -func (n *NodeRefreshableDataResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - expander := ctx.InstanceExpander() - - switch { - case n.Config.Count != nil: - count, countDiags := evaluateCountExpressionValue(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - if !count.IsKnown() { - // If the count isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - c, _ := count.AsBigFloat().Int64() - expander.SetResourceCount(n.Addr.Module, n.Addr.Resource, int(c)) - - case n.Config.ForEach != nil: - forEachVal, forEachDiags := evaluateForEachExpressionValue(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - if !forEachVal.IsKnown() { - // If the for_each isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - expander.SetResourceForEach(n.Addr.Module, n.Addr.Resource, forEachVal.AsValueMap()) - - default: - expander.SetResourceSingle(n.Addr.Module, n.Addr.Resource) - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), n.Config.Count != nil) - - instanceAddrs := expander.ExpandResource(n.Addr) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.ProviderMetas = n.ProviderMetas - - return &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // We also need a destroyable resource for orphans that are a result of a - // scaled-in count. - concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and provider since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Addr: n.ResourceAddr(), - InstanceAddrs: instanceAddrs, - }, - - // Add the count orphans. As these are orphaned refresh nodes, we add them - // directly as NodeDestroyableDataResource. - &OrphanResourceInstanceCountTransformer{ - Concrete: concreteResourceDestroyable, - Addr: n.Addr, - InstanceAddrs: instanceAddrs, - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableDataResource", - } - - graph, diags := b.Build(nil) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableDataResourceInstance represents a single resource instance -// that is refreshable. -type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // These variables are the state for the eval sequence below, and are - // updated through pointers. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Output: &state, - }, - - // EvalReadDataRefresh will _attempt_ to read the data source, but - // may generate an incomplete planned object if the configuration - // includes values that won't be known until apply. - &evalReadDataRefresh{ - evalReadData{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - OutputChange: &change, - State: &state, - }, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return change == nil, nil - - }, - Then: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalUpdateStateHook{}, - }, - }, - Else: &EvalSequence{ - // We can't deal with this yet, so we'll repeat this step - // during the plan walk to produce a planned change to read - // this during the apply walk. However, we do still need to - // save the generated change and partial state so that - // results from it can be included in other data resources - // or provider configurations during the refresh walk. - // (The planned object we save in the state here will be - // pruned out at the end of the refresh walk, returning - // it back to being unset again for subsequent walks.) - Nodes: []EvalNode{ - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - }, - }, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go deleted file mode 100644 index 8c008ddd..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_local.go +++ /dev/null @@ -1,152 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// nodeExpandLocal represents a named local value in a configuration module, -// which has not yet been expanded. -type nodeExpandLocal struct { - Addr addrs.LocalValue - Module addrs.Module - Config *configs.Local -} - -var ( - _ RemovableIfNotTargeted = (*nodeExpandLocal)(nil) - _ GraphNodeReferenceable = (*nodeExpandLocal)(nil) - _ GraphNodeReferencer = (*nodeExpandLocal)(nil) - _ GraphNodeDynamicExpandable = (*nodeExpandLocal)(nil) - _ graphNodeTemporaryValue = (*nodeExpandLocal)(nil) - _ graphNodeExpandsInstances = (*nodeExpandLocal)(nil) -) - -func (n *nodeExpandLocal) expandsInstances() {} - -// graphNodeTemporaryValue -func (n *nodeExpandLocal) temporaryValue() bool { - return true -} - -func (n *nodeExpandLocal) Name() string { - path := n.Module.String() - addr := n.Addr.String() + " (expand)" - - if path != "" { - return path + "." + addr - } - return addr -} - -// GraphNodeModulePath -func (n *nodeExpandLocal) ModulePath() addrs.Module { - return n.Module -} - -// RemovableIfNotTargeted -func (n *nodeExpandLocal) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeReferenceable -func (n *nodeExpandLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// GraphNodeReferencer -func (n *nodeExpandLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return appendResourceDestroyReferences(refs) -} - -func (n *nodeExpandLocal) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Module) { - o := &NodeLocal{ - Addr: n.Addr.Absolute(module), - Config: n.Config, - } - log.Printf("[TRACE] Expanding local: adding %s as %T", o.Addr.String(), o) - g.Add(o) - } - return &g, nil -} - -// NodeLocal represents a named local value in a particular module. -// -// Local value nodes only have one operation, common to all walk types: -// evaluate the result and place it in state. -type NodeLocal struct { - Addr addrs.AbsLocalValue - Config *configs.Local -} - -var ( - _ GraphNodeModuleInstance = (*NodeLocal)(nil) - _ RemovableIfNotTargeted = (*NodeLocal)(nil) - _ GraphNodeReferenceable = (*NodeLocal)(nil) - _ GraphNodeReferencer = (*NodeLocal)(nil) - _ GraphNodeEvalable = (*NodeLocal)(nil) - _ graphNodeTemporaryValue = (*NodeLocal)(nil) - _ dag.GraphNodeDotter = (*NodeLocal)(nil) -) - -// graphNodeTemporaryValue -func (n *NodeLocal) temporaryValue() bool { - return true -} - -func (n *NodeLocal) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeLocal) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *NodeLocal) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// RemovableIfNotTargeted -func (n *NodeLocal) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeReferenceable -func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.LocalValue} -} - -// GraphNodeReferencer -func (n *NodeLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return appendResourceDestroyReferences(refs) -} - -// GraphNodeEvalable -func (n *NodeLocal) EvalTree() EvalNode { - return &EvalLocal{ - Addr: n.Addr.LocalValue, - Expr: n.Config.Expr, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_expand.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_expand.go deleted file mode 100644 index cfc519a3..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_expand.go +++ /dev/null @@ -1,289 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -type ConcreteModuleNodeFunc func(n *nodeExpandModule) dag.Vertex - -// nodeExpandModule represents a module call in the configuration that -// might expand into multiple module instances depending on how it is -// configured. -type nodeExpandModule struct { - Addr addrs.Module - Config *configs.Module - ModuleCall *configs.ModuleCall -} - -var ( - _ RemovableIfNotTargeted = (*nodeExpandModule)(nil) - _ GraphNodeEvalable = (*nodeExpandModule)(nil) - _ GraphNodeReferencer = (*nodeExpandModule)(nil) - _ GraphNodeReferenceOutside = (*nodeExpandModule)(nil) - _ graphNodeExpandsInstances = (*nodeExpandModule)(nil) -) - -func (n *nodeExpandModule) expandsInstances() {} - -func (n *nodeExpandModule) Name() string { - return n.Addr.String() + " (expand)" -} - -// GraphNodeModulePath implementation -func (n *nodeExpandModule) ModulePath() addrs.Module { - return n.Addr -} - -// GraphNodeReferencer implementation -func (n *nodeExpandModule) References() []*addrs.Reference { - var refs []*addrs.Reference - - if n.ModuleCall == nil { - return nil - } - - for _, traversal := range n.ModuleCall.DependsOn { - ref, diags := addrs.ParseRef(traversal) - if diags.HasErrors() { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) - continue - } - - refs = append(refs, ref) - } - - // Expansion only uses the count and for_each expressions, so this - // particular graph node only refers to those. - // Individual variable values in the module call definition might also - // refer to other objects, but that's handled by - // NodeApplyableModuleVariable. - // - // Because our Path method returns the module instance that contains - // our call, these references will be correctly interpreted as being - // in the calling module's namespace, not the namespaces of any of the - // child module instances we might expand to during our evaluation. - - if n.ModuleCall.Count != nil { - countRefs, _ := lang.ReferencesInExpr(n.ModuleCall.Count) - refs = append(refs, countRefs...) - } - if n.ModuleCall.ForEach != nil { - forEachRefs, _ := lang.ReferencesInExpr(n.ModuleCall.ForEach) - refs = append(refs, forEachRefs...) - } - return appendResourceDestroyReferences(refs) -} - -// GraphNodeReferenceOutside -func (n *nodeExpandModule) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return n.Addr, n.Addr.Parent() -} - -// RemovableIfNotTargeted implementation -func (n *nodeExpandModule) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeEvalable -func (n *nodeExpandModule) EvalTree() EvalNode { - return &evalPrepareModuleExpansion{ - Addr: n.Addr, - Config: n.Config, - ModuleCall: n.ModuleCall, - } -} - -// nodeCloseModule represents an expanded module during apply, and is visited -// after all other module instance nodes. This node will depend on all module -// instance resource and outputs, and anything depending on the module should -// wait on this node. -// Besides providing a root node for dependency ordering, nodeCloseModule also -// cleans up state after all the module nodes have been evaluated, removing -// empty resources and modules from the state. -type nodeCloseModule struct { - Addr addrs.Module -} - -var ( - _ GraphNodeReferenceable = (*nodeCloseModule)(nil) - _ GraphNodeReferenceOutside = (*nodeCloseModule)(nil) -) - -func (n *nodeCloseModule) ModulePath() addrs.Module { - return n.Addr -} - -func (n *nodeCloseModule) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return n.Addr.Parent(), n.Addr -} - -func (n *nodeCloseModule) ReferenceableAddrs() []addrs.Referenceable { - _, call := n.Addr.Call() - return []addrs.Referenceable{ - call, - } -} - -func (n *nodeCloseModule) Name() string { - if len(n.Addr) == 0 { - return "root" - } - return n.Addr.String() + " (close)" -} - -// RemovableIfNotTargeted implementation -func (n *nodeCloseModule) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -func (n *nodeCloseModule) EvalTree() EvalNode { - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &evalCloseModule{ - Addr: n.Addr, - }, - }, - }, - } -} - -type evalCloseModule struct { - Addr addrs.Module -} - -func (n *evalCloseModule) Eval(ctx EvalContext) (interface{}, error) { - // We need the full, locked state, because SyncState does not provide a way to - // transact over multiple module instances at the moment. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - for modKey, mod := range state.Modules { - if !n.Addr.Equal(mod.Addr.Module()) { - continue - } - - // clean out any empty resources - for resKey, res := range mod.Resources { - if len(res.Instances) == 0 { - delete(mod.Resources, resKey) - } - } - - // empty child modules are always removed - if len(mod.Resources) == 0 && !mod.Addr.IsRoot() { - delete(state.Modules, modKey) - } - } - return nil, nil -} - -// evalPrepareModuleExpansion is an EvalNode implementation -// that sets the count or for_each on the instance expander -type evalPrepareModuleExpansion struct { - Addr addrs.Module - Config *configs.Module - ModuleCall *configs.ModuleCall -} - -func (n *evalPrepareModuleExpansion) Eval(ctx EvalContext) (interface{}, error) { - expander := ctx.InstanceExpander() - _, call := n.Addr.Call() - - // nodeExpandModule itself does not have visibility into how its ancestors - // were expanded, so we use the expander here to provide all possible paths - // to our module, and register module instances with each of them. - for _, module := range expander.ExpandModule(n.Addr.Parent()) { - ctx = ctx.WithPath(module) - - switch { - case n.ModuleCall.Count != nil: - count, diags := evaluateCountExpression(n.ModuleCall.Count, ctx) - if diags.HasErrors() { - return nil, diags.Err() - } - expander.SetModuleCount(module, call, count) - - case n.ModuleCall.ForEach != nil: - forEach, diags := evaluateForEachExpression(n.ModuleCall.ForEach, ctx) - if diags.HasErrors() { - return nil, diags.Err() - } - expander.SetModuleForEach(module, call, forEach) - - default: - expander.SetModuleSingle(module, call) - } - } - - return nil, nil -} - -// nodeValidateModule wraps a nodeExpand module for validation, ensuring that -// no expansion is attempted during evaluation, when count and for_each -// expressions may not be known. -type nodeValidateModule struct { - nodeExpandModule -} - -// GraphNodeEvalable -func (n *nodeValidateModule) EvalTree() EvalNode { - return &evalValidateModule{ - Addr: n.Addr, - Config: n.Config, - ModuleCall: n.ModuleCall, - } -} - -type evalValidateModule struct { - Addr addrs.Module - Config *configs.Module - ModuleCall *configs.ModuleCall -} - -func (n *evalValidateModule) Eval(ctx EvalContext) (interface{}, error) { - _, call := n.Addr.Call() - expander := ctx.InstanceExpander() - - // Modules all evaluate to single instances during validation, only to - // create a proper context within which to evaluate. All parent modules - // will be a single instance, but still get our address in the expected - // manner anyway to ensure they've been registered correctly. - for _, module := range expander.ExpandModule(n.Addr.Parent()) { - ctx = ctx.WithPath(module) - - // Validate our for_each and count expressions at a basic level - // We skip validation on known, because there will be unknown values before - // a full expansion, presuming these errors will be caught in later steps - switch { - case n.ModuleCall.Count != nil: - _, diags := evaluateCountExpressionValue(n.ModuleCall.Count, ctx) - if diags.HasErrors() { - return nil, diags.Err() - } - - case n.ModuleCall.ForEach != nil: - _, diags := evaluateForEachExpressionValue(n.ModuleCall.ForEach, ctx) - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // now set our own mode to single - expander.SetModuleSingle(module, call) - } - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go deleted file mode 100644 index a4574e92..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ /dev/null @@ -1,225 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" - "github.com/zclconf/go-cty/cty" -) - -// nodeExpandModuleVariable is the placeholder for an variable that has not yet had -// its module path expanded. -type nodeExpandModuleVariable struct { - Addr addrs.InputVariable - Module addrs.Module - Config *configs.Variable - Expr hcl.Expression -} - -var ( - _ GraphNodeDynamicExpandable = (*nodeExpandModuleVariable)(nil) - _ GraphNodeReferenceOutside = (*nodeExpandModuleVariable)(nil) - _ GraphNodeReferenceable = (*nodeExpandModuleVariable)(nil) - _ GraphNodeReferencer = (*nodeExpandModuleVariable)(nil) - _ graphNodeTemporaryValue = (*nodeExpandModuleVariable)(nil) - _ RemovableIfNotTargeted = (*nodeExpandModuleVariable)(nil) - _ graphNodeExpandsInstances = (*nodeExpandModuleVariable)(nil) -) - -func (n *nodeExpandModuleVariable) expandsInstances() {} - -func (n *nodeExpandModuleVariable) temporaryValue() bool { - return true -} - -func (n *nodeExpandModuleVariable) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Module) { - o := &nodeModuleVariable{ - Addr: n.Addr.Absolute(module), - Config: n.Config, - Expr: n.Expr, - ModuleInstance: module, - } - g.Add(o) - } - return &g, nil -} - -func (n *nodeExpandModuleVariable) Name() string { - return fmt.Sprintf("%s.%s (expand)", n.Module, n.Addr.String()) -} - -// GraphNodeModulePath -func (n *nodeExpandModuleVariable) ModulePath() addrs.Module { - return n.Module -} - -// GraphNodeReferencer -func (n *nodeExpandModuleVariable) References() []*addrs.Reference { - - // If we have no value expression, we cannot depend on anything. - if n.Expr == nil { - return nil - } - - // Variables in the root don't depend on anything, because their values - // are gathered prior to the graph walk and recorded in the context. - if len(n.Module) == 0 { - return nil - } - - // Otherwise, we depend on anything referenced by our value expression. - // We ignore diagnostics here under the assumption that we'll re-eval - // all these things later and catch them then; for our purposes here, - // we only care about valid references. - // - // Due to our GraphNodeReferenceOutside implementation, the addresses - // returned by this function are interpreted in the _parent_ module from - // where our associated variable was declared, which is correct because - // our value expression is assigned within a "module" block in the parent - // module. - refs, _ := lang.ReferencesInExpr(n.Expr) - return refs -} - -// GraphNodeReferenceOutside implementation -func (n *nodeExpandModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return n.Module, n.Module.Parent() -} - -// GraphNodeReferenceable -func (n *nodeExpandModuleVariable) ReferenceableAddrs() []addrs.Referenceable { - // FIXME: References for module variables probably need to be thought out a bit more - // Otherwise, we can reference the output via the address itself, or the - // module call - _, call := n.Module.Call() - return []addrs.Referenceable{n.Addr, call} -} - -// RemovableIfNotTargeted -func (n *nodeExpandModuleVariable) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeTargetDownstream -func (n *nodeExpandModuleVariable) TargetDownstream(targetedDeps, untargetedDeps dag.Set) bool { - return true -} - -// nodeModuleVariable represents a module variable input during -// the apply step. -type nodeModuleVariable struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable // Config is the var in the config - Expr hcl.Expression // Expr is the value expression given in the call - // ModuleInstance in order to create the appropriate context for evaluating - // ModuleCallArguments, ex. so count.index and each.key can resolve - ModuleInstance addrs.ModuleInstance -} - -// Ensure that we are implementing all of the interfaces we think we are -// implementing. -var ( - _ GraphNodeModuleInstance = (*nodeModuleVariable)(nil) - _ RemovableIfNotTargeted = (*nodeModuleVariable)(nil) - _ GraphNodeEvalable = (*nodeModuleVariable)(nil) - _ graphNodeTemporaryValue = (*nodeModuleVariable)(nil) - _ dag.GraphNodeDotter = (*nodeModuleVariable)(nil) -) - -func (n *nodeModuleVariable) temporaryValue() bool { - return true -} - -func (n *nodeModuleVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *nodeModuleVariable) Path() addrs.ModuleInstance { - // We execute in the parent scope (above our own module) because - // expressions in our value are resolved in that context. - return n.Addr.Module.Parent() -} - -// GraphNodeModulePath -func (n *nodeModuleVariable) ModulePath() addrs.Module { - return n.Addr.Module.Parent().Module() -} - -// RemovableIfNotTargeted -func (n *nodeModuleVariable) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeEvalable -func (n *nodeModuleVariable) EvalTree() EvalNode { - // If we have no value, do nothing - if n.Expr == nil { - return &EvalNoop{} - } - - // Otherwise, interpolate the value of this variable and set it - // within the variables mapping. - vals := make(map[string]cty.Value) - - _, call := n.Addr.Module.CallInstance() - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy}, - Node: &EvalModuleCallArgument{ - Addr: n.Addr.Variable, - Config: n.Config, - Expr: n.Expr, - ModuleInstance: n.ModuleInstance, - Values: vals, - }, - }, - - &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalModuleCallArgument{ - Addr: n.Addr.Variable, - Config: n.Config, - Expr: n.Expr, - ModuleInstance: n.ModuleInstance, - Values: vals, - validateOnly: true, - }, - }, - - &EvalSetModuleCallArguments{ - Module: call, - Values: vals, - }, - - &evalVariableValidations{ - Addr: n.Addr, - Config: n.Config, - Expr: n.Expr, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *nodeModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go deleted file mode 100644 index 729710f2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ /dev/null @@ -1,302 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// nodeExpandOutput is the placeholder for an output that has not yet had -// its module path expanded. -type nodeExpandOutput struct { - Addr addrs.OutputValue - Module addrs.Module - Config *configs.Output -} - -var ( - _ RemovableIfNotTargeted = (*nodeExpandOutput)(nil) - _ GraphNodeReferenceable = (*nodeExpandOutput)(nil) - _ GraphNodeReferencer = (*nodeExpandOutput)(nil) - _ GraphNodeDynamicExpandable = (*nodeExpandOutput)(nil) - _ graphNodeTemporaryValue = (*nodeExpandOutput)(nil) - _ graphNodeExpandsInstances = (*nodeExpandOutput)(nil) -) - -func (n *nodeExpandOutput) expandsInstances() {} - -func (n *nodeExpandOutput) temporaryValue() bool { - // this must always be evaluated if it is a root module output - return !n.Module.IsRoot() -} - -func (n *nodeExpandOutput) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Module) { - o := &NodeApplyableOutput{ - Addr: n.Addr.Absolute(module), - Config: n.Config, - } - log.Printf("[TRACE] Expanding output: adding %s as %T", o.Addr.String(), o) - g.Add(o) - } - return &g, nil -} - -func (n *nodeExpandOutput) Name() string { - path := n.Module.String() - addr := n.Addr.String() + " (expand)" - if path != "" { - return path + "." + addr - } - return addr -} - -// GraphNodeModulePath -func (n *nodeExpandOutput) ModulePath() addrs.Module { - return n.Module -} - -// GraphNodeReferenceable -func (n *nodeExpandOutput) ReferenceableAddrs() []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if n.Module.IsRoot() { - return nil - } - - // the output is referenced through the module call, and via the - // module itself. - _, call := n.Module.Call() - callOutput := addrs.ModuleCallOutput{ - Call: call, - Name: n.Addr.Name, - } - - // Otherwise, we can reference the output via the - // module call itself - return []addrs.Referenceable{call, callOutput} -} - -// GraphNodeReferenceOutside implementation -func (n *nodeExpandOutput) ReferenceOutside() (selfPath, referencePath addrs.Module) { - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = n.Module - - // ...but they are referenced in the context of their calling module. - selfPath = referencePath.Parent() - - return // uses named return values -} - -// GraphNodeReferencer -func (n *nodeExpandOutput) References() []*addrs.Reference { - return appendResourceDestroyReferences(referencesForOutput(n.Config)) -} - -// RemovableIfNotTargeted -func (n *nodeExpandOutput) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeTargetDownstream -func (n *nodeExpandOutput) TargetDownstream(targetedDeps, untargetedDeps dag.Set) bool { - return true -} - -// NodeApplyableOutput represents an output that is "applyable": -// it is ready to be applied. -type NodeApplyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeModuleInstance = (*NodeApplyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) - _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) - _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) - _ graphNodeTemporaryValue = (*NodeApplyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) -) - -func (n *NodeApplyableOutput) temporaryValue() bool { - // this must always be evaluated if it is a root module output - return !n.Addr.Module.IsRoot() -} - -func (n *NodeApplyableOutput) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *NodeApplyableOutput) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// RemovableIfNotTargeted -func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeTargetDownstream -func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps dag.Set) bool { - // If any of the direct dependencies of an output are targeted then - // the output must always be targeted as well, so its value will always - // be up-to-date at the completion of an apply walk. - return true -} - -func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.Module) { - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = addr.Module.Module() - - // ...but they are referenced in the context of their calling module. - selfPath = addr.Module.Parent().Module() - - return // uses named return values -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.Module) { - return referenceOutsideForOutput(n.Addr) -} - -func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if addr.Module.IsRoot() { - return nil - } - - // Otherwise, we can be referenced via a reference to our output name - // on the parent module's call, or via a reference to the entire call. - // e.g. module.foo.bar or just module.foo . - // Note that our ReferenceOutside method causes these addresses to be - // relative to the calling module, not the module where the output - // was declared. - _, outp := addr.ModuleCallOutput() - _, call := addr.Module.CallInstance() - - return []addrs.Referenceable{outp, call} -} - -// GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -func referencesForOutput(c *configs.Output) []*addrs.Reference { - impRefs, _ := lang.ReferencesInExpr(c.Expr) - expRefs, _ := lang.References(c.DependsOn) - l := len(impRefs) + len(expRefs) - if l == 0 { - return nil - } - refs := make([]*addrs.Reference, 0, l) - refs = append(refs, impRefs...) - refs = append(refs, expRefs...) - return refs - -} - -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []*addrs.Reference { - return appendResourceDestroyReferences(referencesForOutput(n.Config)) -} - -// GraphNodeEvalable -func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkEval, walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, - Node: &EvalWriteOutput{ - Addr: n.Addr.OutputValue, - Sensitive: n.Config.Sensitive, - Expr: n.Config.Expr, - }, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -// NodeDestroyableOutput represents an output that is "destroybale": -// its application will remove the output from the state. -type NodeDestroyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) - _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) -) - -func (n *NodeDestroyableOutput) Name() string { - return fmt.Sprintf("%s (destroy)", n.Addr.String()) -} - -// GraphNodeModulePath -func (n *NodeDestroyableOutput) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// RemovableIfNotTargeted -func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// This will keep the destroy node in the graph if its corresponding output -// node is also in the destroy graph. -func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps dag.Set) bool { - return true -} - -// GraphNodeEvalable -func (n *NodeDestroyableOutput) EvalTree() EvalNode { - return &EvalDeleteOutput{ - Addr: n.Addr, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go deleted file mode 100644 index 2071ab16..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -// NodeApplyableProvider represents a provider during an apply. -type NodeApplyableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n, n.ProviderConfig()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go deleted file mode 100644 index 5c30364e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ /dev/null @@ -1,103 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - - "github.com/hashicorp/terraform/dag" -) - -// ConcreteProviderNodeFunc is a callback type used to convert an -// abstract provider to a concrete one of some type. -type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex - -// NodeAbstractProvider represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeAbstractProvider struct { - Addr addrs.AbsProviderConfig - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *configs.Provider - Schema *configschema.Block -} - -var ( - _ GraphNodeModulePath = (*NodeAbstractProvider)(nil) - _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) - _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) - _ GraphNodeProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) -) - -func (n *NodeAbstractProvider) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { - // Providers cannot be contained inside an expanded module, so this shim - // converts our module path to the correct ModuleInstance. - return n.Addr.Module.UnkeyedInstanceShim() -} - -// GraphNodeModulePath -func (n *NodeAbstractProvider) ModulePath() addrs.Module { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferencer -func (n *NodeAbstractProvider) References() []*addrs.Reference { - if n.Config == nil || n.Schema == nil { - return nil - } - - return ReferencesFromConfig(n.Config.Config, n.Schema) -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { - if n.Config == nil { - return nil - } - - return n.Config -} - -// GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { - n.Config = c -} - -// GraphNodeAttachProviderConfigSchema impl. -func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { - n.Schema = schema -} - -// GraphNodeDotter impl. -func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go deleted file mode 100644 index 6f7a6d27..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// NodeDisabledProvider represents a provider that is disabled. A disabled -// provider does nothing. It exists to properly set inheritance information -// for child providers. -type NodeDisabledProvider struct { - *NodeAbstractProvider -} - -var ( - _ GraphNodeModulePath = (*NodeDisabledProvider)(nil) - _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) - _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) - _ GraphNodeProvider = (*NodeDisabledProvider)(nil) - _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) - _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) -) - -func (n *NodeDisabledProvider) Name() string { - return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go deleted file mode 100644 index 4814d1fa..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go +++ /dev/null @@ -1,18 +0,0 @@ -package terraform - -// NodeEvalableProvider represents a provider during an "eval" walk. -// This special provider node type just initializes a provider and -// fetches its schema, without configuring it or otherwise interacting -// with it. -type NodeEvalableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeEvalableProvider) EvalTree() EvalNode { - addr := n.Addr - - return &EvalInitProvider{ - Addr: addr, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go deleted file mode 100644 index 1160498a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" -) - -// NodeProvisioner represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeProvisioner struct { - NameValue string - PathValue addrs.ModuleInstance -} - -var ( - _ GraphNodeModuleInstance = (*NodeProvisioner)(nil) - _ GraphNodeProvisioner = (*NodeProvisioner)(nil) - _ GraphNodeEvalable = (*NodeProvisioner)(nil) -) - -func (n *NodeProvisioner) Name() string { - result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 0 { - result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) - } - - return result -} - -// GraphNodeModuleInstance -func (n *NodeProvisioner) Path() addrs.ModuleInstance { - return n.PathValue -} - -// GraphNodeProvisioner -func (n *NodeProvisioner) ProvisionerName() string { - return n.NameValue -} - -// GraphNodeEvalable impl. -func (n *NodeProvisioner) EvalTree() EvalNode { - return &EvalInitProvisioner{Name: n.NameValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go deleted file mode 100644 index 11eca0d9..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ /dev/null @@ -1,439 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/states" -) - -// ConcreteResourceNodeFunc is a callback type used to convert an -// abstract resource to a concrete one of some type. -type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex - -// GraphNodeConfigResource is implemented by any nodes that represent a resource. -// The type of operation cannot be assumed, only that this node represents -// the given resource. -type GraphNodeConfigResource interface { - ResourceAddr() addrs.ConfigResource -} - -// ConcreteResourceInstanceNodeFunc is a callback type used to convert an -// abstract resource instance to a concrete one of some type. -type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex - -// GraphNodeResourceInstance is implemented by any nodes that represent -// a resource instance. A single resource may have multiple instances if, -// for example, the "count" or "for_each" argument is used for it in -// configuration. -type GraphNodeResourceInstance interface { - ResourceInstanceAddr() addrs.AbsResourceInstance - - // StateDependencies returns any inter-resource dependencies that are - // stored in the state. - StateDependencies() []addrs.ConfigResource -} - -// NodeAbstractResource represents a resource that has no associated -// operations. It registers all the interfaces for a resource that common -// across multiple operation types. -type NodeAbstractResource struct { - Addr addrs.ConfigResource - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Schema *configschema.Block // Schema for processing the configuration body - SchemaVersion uint64 // Schema version of "Schema", as decided by the provider - Config *configs.Resource // Config is the resource in the config - - // ProviderMetas is the provider_meta configs for the module this resource belongs to - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - - ProvisionerSchemas map[string]*configschema.Block - - // Set from GraphNodeTargetable - Targets []addrs.Targetable - - // Set from GraphNodeDependsOn - dependsOn []addrs.ConfigResource - - // The address of the provider this resource will use - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) - _ GraphNodeReferencer = (*NodeAbstractResource)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeConfigResource = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResource)(nil) - _ GraphNodeTargetable = (*NodeAbstractResource)(nil) - _ graphNodeAttachResourceDependencies = (*NodeAbstractResource)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) -) - -// NewNodeAbstractResource creates an abstract resource graph node for -// the given absolute resource address. -func NewNodeAbstractResource(addr addrs.ConfigResource) *NodeAbstractResource { - return &NodeAbstractResource{ - Addr: addr, - } -} - -// NodeAbstractResourceInstance represents a resource instance with no -// associated operations. It embeds NodeAbstractResource but additionally -// contains an instance key, used to identify one of potentially many -// instances that were created from a resource in configuration, e.g. using -// the "count" or "for_each" arguments. -type NodeAbstractResourceInstance struct { - NodeAbstractResource - Addr addrs.AbsResourceInstance - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - ResourceState *states.Resource - Dependencies []addrs.ConfigResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) -) - -// NewNodeAbstractResourceInstance creates an abstract resource instance graph -// node for the given absolute resource instance address. -func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { - // Due to the fact that we embed NodeAbstractResource, the given address - // actually ends up split between the resource address in the embedded - // object and the InstanceKey field in our own struct. The - // ResourceInstanceAddr method will stick these back together again on - // request. - r := NewNodeAbstractResource(addr.ContainingResource().Config()) - return &NodeAbstractResourceInstance{ - NodeAbstractResource: *r, - Addr: addr, - } -} - -func (n *NodeAbstractResource) Name() string { - return n.ResourceAddr().String() -} - -func (n *NodeAbstractResourceInstance) Name() string { - return n.ResourceInstanceAddr().String() -} - -func (n *NodeAbstractResourceInstance) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *NodeAbstractResource) ModulePath() addrs.Module { - return n.Addr.Module -} - -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Resource} -} - -// GraphNodeReferenceable -func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - addr := n.ResourceInstanceAddr() - return []addrs.Referenceable{ - addr.Resource, - - // A resource instance can also be referenced by the address of its - // containing resource, so that e.g. a reference to aws_instance.foo - // would match both aws_instance.foo[0] and aws_instance.foo[1]. - addr.ContainingResource().Resource, - } -} - -// GraphNodeReferencer -func (n *NodeAbstractResource) References() []*addrs.Reference { - // If we have a config then we prefer to use that. - if c := n.Config; c != nil { - var result []*addrs.Reference - - result = append(result, n.DependsOn()...) - - if n.Schema == nil { - // Should never happen, but we'll log if it does so that we can - // see this easily when debugging. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - } - - refs, _ := lang.ReferencesInExpr(c.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(c.ForEach) - result = append(result, refs...) - - // ReferencesInBlock() requires a schema - if n.Schema != nil { - refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) - } - - result = append(result, refs...) - if c.Managed != nil { - if c.Managed.Connection != nil { - refs, _ = lang.ReferencesInBlock(c.Managed.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - for _, p := range c.Managed.Provisioners { - if p.When != configs.ProvisionerWhenCreate { - continue - } - if p.Connection != nil { - refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - schema := n.ProvisionerSchemas[p.Type] - if schema == nil { - log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) - } - refs, _ = lang.ReferencesInBlock(p.Config, schema) - result = append(result, refs...) - } - } - return result - } - - // Otherwise, we have no references. - return nil -} - -func (n *NodeAbstractResource) DependsOn() []*addrs.Reference { - var result []*addrs.Reference - if c := n.Config; c != nil { - - for _, traversal := range c.DependsOn { - ref, diags := addrs.ParseRef(traversal) - if diags.HasErrors() { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) - continue - } - - result = append(result, ref) - } - } - return result -} - -// GraphNodeReferencer -func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { - // If we have a configuration attached then we'll delegate to our - // embedded abstract resource, which knows how to extract dependencies - // from configuration. If there is no config, then the dependencies will - // be connected during destroy from those stored in the state. - if n.Config != nil { - if n.Schema == nil { - // We'll produce a log message about this out here so that - // we can include the full instance address, since the equivalent - // message in NodeAbstractResource.References cannot see it. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - return nil - } - return n.NodeAbstractResource.References() - } - - // If we have neither config nor state then we have no references. - return nil -} - -// converts an instance address to the legacy dotted notation -func dottedInstanceAddr(tr addrs.ResourceInstance) string { - // The legacy state format uses dot-separated instance keys, - // rather than bracketed as in our modern syntax. - var suffix string - switch tk := tr.Key.(type) { - case addrs.IntKey: - suffix = fmt.Sprintf(".%d", int(tk)) - case addrs.StringKey: - suffix = fmt.Sprintf(".%s", string(tk)) - } - return tr.Resource.String() + suffix -} - -// StateDependencies returns the dependencies saved in the state. -func (n *NodeAbstractResourceInstance) StateDependencies() []addrs.ConfigResource { - if rs := n.ResourceState; rs != nil { - if s := rs.Instance(n.Addr.Resource.Key); s != nil { - if s.Current != nil { - return s.Current.Dependencies - } - } - } - - return nil -} - -func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { - n.ResolvedProvider = p -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() (addrs.ProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return addrs.LocalProviderConfig{ - LocalName: relAddr.LocalName, - Alias: relAddr.Alias, - }, false - } - - // No provider configuration found; return a default address - return addrs.AbsProviderConfig{ - Provider: n.Provider(), - Module: n.ModulePath(), - }, false -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) Provider() addrs.Provider { - if n.Config != nil { - return n.Config.Provider - } - return addrs.ImpliedProviderForUnqualifiedType(n.Addr.Resource.ImpliedProvider()) -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.ProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return addrs.LocalProviderConfig{ - LocalName: relAddr.LocalName, - Alias: relAddr.Alias, - }, false - } - - // If we have state, then we will use the provider from there - if n.ResourceState != nil { - // An address from the state must match exactly, since we must ensure - // we refresh/destroy a resource with the same provider configuration - // that created it. - return n.ResourceState.ProviderConfig, true - } - - // No provider configuration found; return a default address - return addrs.AbsProviderConfig{ - Provider: n.Provider(), - Module: n.ModulePath(), - }, false -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResourceInstance) Provider() addrs.Provider { - if n.Config != nil { - return n.Config.Provider - } - return addrs.NewDefaultProvider(n.Addr.Resource.ContainingResource().ImpliedProvider()) -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) ProvisionedBy() []string { - // If we have no configuration, then we have no provisioners - if n.Config == nil || n.Config.Managed == nil { - return nil - } - - // Build the list of provisioners we need based on the configuration. - // It is okay to have duplicates here. - result := make([]string, len(n.Config.Managed.Provisioners)) - for i, p := range n.Config.Managed.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { - if n.ProvisionerSchemas == nil { - n.ProvisionerSchemas = make(map[string]*configschema.Block) - } - n.ProvisionerSchemas[name] = schema -} - -// GraphNodeResource -func (n *NodeAbstractResource) ResourceAddr() addrs.ConfigResource { - return n.Addr -} - -// GraphNodeResourceInstance -func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { - return n.Addr -} - -// GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { - n.Targets = targets -} - -// graphNodeAttachResourceDependencies -func (n *NodeAbstractResource) AttachResourceDependencies(deps []addrs.ConfigResource) { - n.dependsOn = deps -} - -// GraphNodeAttachResourceState -func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { - n.ResourceState = s -} - -// GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { - n.Config = c -} - -// GraphNodeAttachResourceSchema impl -func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { - n.Schema = schema - n.SchemaVersion = version -} - -// GraphNodeAttachProviderMetaConfigs impl -func (n *NodeAbstractResource) AttachProviderMetaConfigs(c map[addrs.Provider]*configs.ProviderMeta) { - n.ProviderMetas = c -} - -// GraphNodeDotter impl. -func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "box", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go deleted file mode 100644 index d02dc09c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ /dev/null @@ -1,115 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// nodeExpandApplyableResource handles the first layer of resource -// expansion during apply. Even though the resource instances themselves are -// already expanded from the plan, we still need to expand the -// NodeApplyableResource nodes into their respective modules. -type nodeExpandApplyableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeDynamicExpandable = (*nodeExpandApplyableResource)(nil) - _ GraphNodeReferenceable = (*nodeExpandApplyableResource)(nil) - _ GraphNodeReferencer = (*nodeExpandApplyableResource)(nil) - _ GraphNodeConfigResource = (*nodeExpandApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*nodeExpandApplyableResource)(nil) - _ graphNodeExpandsInstances = (*nodeExpandApplyableResource)(nil) -) - -func (n *nodeExpandApplyableResource) expandsInstances() {} - -func (n *nodeExpandApplyableResource) References() []*addrs.Reference { - return (&NodeApplyableResource{NodeAbstractResource: n.NodeAbstractResource}).References() -} - -func (n *nodeExpandApplyableResource) Name() string { - return n.NodeAbstractResource.Name() + " (expand)" -} - -func (n *nodeExpandApplyableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - - expander := ctx.InstanceExpander() - moduleInstances := expander.ExpandModule(n.Addr.Module) - var resources []addrs.AbsResource - for _, module := range moduleInstances { - resAddr := n.Addr.Resource.Absolute(module) - resources = append(resources, resAddr) - g.Add(&NodeApplyableResource{ - NodeAbstractResource: n.NodeAbstractResource, - Addr: n.Addr.Resource.Absolute(module), - }) - } - - return &g, nil -} - -// NodeApplyableResource represents a resource that is "applyable": -// it may need to have its record in the state adjusted to match configuration. -// -// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, -// it should be inserted into the same graph as any instances of the nodes -// with dependency edges ensuring that the resource is evaluated before any -// of its instances, which will turn ensure that the whole-resource record -// in the state is suitably prepared to receive any updates to instances. -type NodeApplyableResource struct { - *NodeAbstractResource - - Addr addrs.AbsResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeApplyableResource)(nil) - _ GraphNodeConfigResource = (*NodeApplyableResource)(nil) - _ GraphNodeEvalable = (*NodeApplyableResource)(nil) - _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) - _ GraphNodeReferencer = (*NodeApplyableResource)(nil) -) - -func (n *NodeApplyableResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -func (n *NodeApplyableResource) References() []*addrs.Reference { - if n.Config == nil { - log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) - return nil - } - - var result []*addrs.Reference - - // Since this node type only updates resource-level metadata, we only - // need to worry about the parts of the configuration that affect - // our "each mode": the count and for_each meta-arguments. - refs, _ := lang.ReferencesInExpr(n.Config.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(n.Config.ForEach) - result = append(result, refs...) - - return result -} - -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - if n.Config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name()) - return &EvalNoop{} - } - - return &EvalWriteResourceState{ - Addr: n.Addr, - Config: n.Config, - ProviderAddr: n.ResolvedProvider, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go deleted file mode 100644 index 1282f8a1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go +++ /dev/null @@ -1,444 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// NodeApplyableResourceInstance represents a resource instance that is -// "applyable": it is ready to be applied and is represented by a diff. -// -// This node is for a specific instance of a resource. It will usually be -// accompanied in the graph by a NodeApplyableResource representing its -// containing resource, and should depend on that node to ensure that the -// state is properly prepared to receive changes to instances. -type NodeApplyableResourceInstance struct { - *NodeAbstractResourceInstance - - destroyNode GraphNodeDestroyerCBD - graphNodeDeposer // implementation of GraphNodeDeposerConfig - - // If this node is forced to be CreateBeforeDestroy, we need to record that - // in the state to. - ForceCreateBeforeDestroy bool -} - -var ( - _ GraphNodeConfigResource = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeAttachDependencies = (*NodeApplyableResourceInstance)(nil) -) - -// GraphNodeAttachDestroyer -func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { - n.destroyNode = d -} - -// CreateBeforeDestroy checks this nodes config status and the status af any -// companion destroy node for CreateBeforeDestroy. -func (n *NodeApplyableResourceInstance) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy { - return n.ForceCreateBeforeDestroy - } - - if n.Config != nil && n.Config.Managed != nil { - return n.Config.Managed.CreateBeforeDestroy - } - - if n.destroyNode != nil { - return n.destroyNode.CreateBeforeDestroy() - } - - return false -} - -func (n *NodeApplyableResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = v - return nil -} - -// GraphNodeCreator -func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeReferencer, overriding NodeAbstractResourceInstance -func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { - // Start with the usual resource instance implementation - ret := n.NodeAbstractResourceInstance.References() - - // Applying a resource must also depend on the destruction of any of its - // dependencies, since this may for example affect the outcome of - // evaluating an entire list of resources with "count" set (by reducing - // the count). - // - // However, we can't do this in create_before_destroy mode because that - // would create a dependency cycle. We make a compromise here of requiring - // changes to be updated across two applies in this case, since the first - // plan will use the old values. - if !n.CreateBeforeDestroy() { - for _, ref := range ret { - switch tr := ref.Subject.(type) { - case addrs.ResourceInstance: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - case addrs.Resource: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - } - } - } - - return ret -} - -// GraphNodeAttachDependencies -func (n *NodeApplyableResourceInstance) AttachDependencies(deps []addrs.ConfigResource) { - n.Dependencies = deps -} - -// GraphNodeEvalable -func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - if n.Config == nil { - // This should not be possible, but we've got here in at least one - // case as discussed in the following issue: - // https://github.com/hashicorp/terraform/issues/21258 - // To avoid an outright crash here, we'll instead return an explicit - // error. - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource node has no configuration attached", - fmt.Sprintf( - "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", - addr, - ), - )) - err := diags.Err() - return &EvalReturnError{ - Error: &err, - } - } - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - - // Stop early if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if change == nil { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // In this particular call to EvalReadData we include our planned - // change, which signals that we expect this read to complete fully - // with no unknown values; it'll produce an error if not. - &evalReadDataApply{ - evalReadData{ - Addr: addr.Resource, - Config: n.Config, - Planned: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - - &EvalUpdateStateHook{}, - }, - } -} - -func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var diff, diffApply *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var err error - var createNew bool - var createBeforeDestroyEnabled bool - var deposedKey states.DeposedKey - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diffApply, - }, - - // We don't want to do any destroys - // (these are handled by NodeDestroyResourceInstance instead) - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - if diffApply.Action == plans.Delete { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) - } - if destroy && n.CreateBeforeDestroy() { - createBeforeDestroyEnabled = true - } - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Addr: addr.Resource, - ForceKey: n.PreallocatedDeposedKey, - OutputKey: &deposedKey, - }, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // Get the saved diff - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diff, - }, - - // Make a new diff, in case we've learned new values in the state - // during apply which we can now incorporate. - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - PreviousDiff: &diff, - OutputChange: &diffApply, - OutputState: &state, - }, - - // Compare the diffs - &EvalCheckPlannedChange{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Planned: &diff, - Actual: &diffApply, - }, - - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &diffApply, - Destroy: false, - OutChange: &diffApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it only requires destroying, since destroying - // is handled by NodeDestroyResourceInstance. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil || diffApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - }, - &EvalApply{ - Addr: addr.Resource, - Config: n.Config, - State: &state, - Change: &diffApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - CreateNew: &createNew, - CreateBeforeDestroy: n.CreateBeforeDestroy(), - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, // EvalApplyProvisioners will skip if already tainted - ResourceConfig: n.Config, - CreateNew: &createNew, - Error: &err, - When: configs.ProvisionerWhenCreate, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalMaybeRestoreDeposedObject{ - Addr: addr.Resource, - PlannedChange: &diffApply, - Key: &deposedKey, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if !diff.Action.IsReplace() { - return true, nil - } - if !n.CreateBeforeDestroy() { - return true, nil - } - return false, nil - }, - Then: &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - }, - - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go deleted file mode 100644 index 6fc24fee..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ /dev/null @@ -1,284 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" -) - -// NodeDestroyResourceInstance represents a resource instance that is to be -// destroyed. -type NodeDestroyResourceInstance struct { - *NodeAbstractResourceInstance - - // If DeposedKey is set to anything other than states.NotDeposed then - // this node destroys a deposed object of the associated instance - // rather than its current object. - DeposedKey states.DeposedKey - - CreateBeforeDestroyOverride *bool -} - -var ( - _ GraphNodeModuleInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) -) - -func (n *NodeDestroyResourceInstance) Name() string { - if n.DeposedKey != states.NotDeposed { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) - } - return n.ResourceInstanceAddr().String() + " (destroy)" -} - -// GraphNodeDestroyer -func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { - if n.CreateBeforeDestroyOverride != nil { - return *n.CreateBeforeDestroyOverride - } - - // Config takes precedence - if n.Config != nil && n.Config.Managed != nil { - return n.Config.Managed.CreateBeforeDestroy - } - - // Otherwise check the state for a stored destroy order - if rs := n.ResourceState; rs != nil { - if s := rs.Instance(n.Addr.Resource.Key); s != nil { - if s.Current != nil { - return s.Current.CreateBeforeDestroy - } - } - } - - return false -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - n.CreateBeforeDestroyOverride = &v - return nil -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() - destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) - - phaseType := addrs.ResourceInstancePhaseDestroy - if n.CreateBeforeDestroy() { - phaseType = addrs.ResourceInstancePhaseDestroyCBD - } - - for i, normalAddr := range normalAddrs { - switch ta := normalAddr.(type) { - case addrs.Resource: - destroyAddrs[i] = ta.Phase(phaseType) - case addrs.ResourceInstance: - destroyAddrs[i] = ta.Phase(phaseType) - default: - destroyAddrs[i] = normalAddr - } - } - - return destroyAddrs -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { - // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil && c.Managed != nil { - var result []*addrs.Reference - - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - for _, p := range c.Managed.Provisioners { - schema := n.ProvisionerSchemas[p.Type] - - if p.When == configs.ProvisionerWhenDestroy { - if p.Connection != nil { - result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) - } - result = append(result, ReferencesFromConfig(p.Config, schema)...) - } - } - - return result - } - - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Get our state - rs := n.ResourceState - var is *states.ResourceInstance - if rs != nil { - is = rs.Instance(n.Addr.Resource.Key) - } - if is == nil { - log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) - } - - var changeApply *plans.ResourceInstanceChange - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var err error - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &changeApply, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &changeApply, - Destroy: true, - OutChange: &changeApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it does not require destroying. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if changeApply == nil || changeApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalReadState{ - Addr: addr.Resource, - Output: &state, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalRequireState{ - State: &state, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &changeApply, - }, - - // Run destroy provisioners if not tainted - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Status == states.ObjectTainted { - return false, nil - } - - return true, nil - }, - - Then: &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, - ResourceConfig: n.Config, - Error: &err, - When: configs.ProvisionerWhenDestroy, - }, - }, - - // If we have a provisioning error, then we just call - // the post-apply hook now. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return err != nil, nil - }, - - Then: &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - }, - - // Managed resources need to be destroyed, while data sources - // are only removed from state. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return addr.Resource.Resource.Mode == addrs.ManagedResourceMode, nil - }, - - Then: &EvalSequence{ - Nodes: []EvalNode{ - &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - }, - Else: &evalWriteEmptyState{ - EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - }, - }, - }, - - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go deleted file mode 100644 index 162655bf..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go +++ /dev/null @@ -1,314 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert -// an abstract resource instance to a concrete one of some type that has -// an associated deposed object key. -type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex - -type GraphNodeDeposedResourceInstanceObject interface { - DeposedInstanceObjectKey() states.DeposedKey -} - -// NodePlanDeposedResourceInstanceObject represents deposed resource -// instance objects during plan. These are distinct from the primary object -// for each resource instance since the only valid operation to do with them -// is to destroy them. -// -// This node type is also used during the refresh walk to ensure that the -// record of a deposed object is up-to-date before we plan to destroy it. -type NodePlanDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeConfigResource = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) -) - -func (n *NodePlanDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) -} - -func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeEvalable impl. -func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // During the refresh walk we will ensure that our record of the deposed - // object is up-to-date. If it was already deleted outside of Terraform - // then this will remove it from state and thus avoid us planning a - // destroy for it during the subsequent plan walk. - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Key: n.DeposedKey, - Output: &state, - }, - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - }, - }) - - // During the plan walk we always produce a planned destroy change, because - // destroying is the only supported action for deposed objects. - var change *plans.ResourceInstanceChange - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan, walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - DeposedKey: n.DeposedKey, - State: &state, - Output: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - DeposedKey: n.DeposedKey, - ProviderSchema: &providerSchema, - Change: &change, - }, - // Since deposed objects cannot be referenced by expressions - // elsewhere, we don't need to also record the planned new - // state in this case. - }, - }, - }) - - return seq -} - -// NodeDestroyDeposedResourceInstanceObject represents deposed resource -// instance objects during apply. Nodes of this type are inserted by -// DiffTransformer when the planned changeset contains "delete" changes for -// deposed instance objects, and its only supported operation is to destroy -// and then forget the associated object. -type NodeDestroyDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeConfigResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) -) - -func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) -} - -func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeDestroyer -func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { - // A deposed instance is always CreateBeforeDestroy by definition, since - // we use deposed only to handle create-before-destroy. - return true -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { - if !v { - // Should never happen: deposed instances are _always_ create_before_destroy. - return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") - } - return nil -} - -// GraphNodeEvalable impl. -func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var change *plans.ResourceInstanceChange - var err error - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &change, - }, - &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - // Always write the resource back to the state deposed... if it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalReturnError{ - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} - -// GraphNodeDeposer is an optional interface implemented by graph nodes that -// might create a single new deposed object for a specific associated resource -// instance, allowing a caller to optionally pre-allocate a DeposedKey for -// it. -type GraphNodeDeposer interface { - // SetPreallocatedDeposedKey will be called during graph construction - // if a particular node must use a pre-allocated deposed key if/when it - // "deposes" the current object of its associated resource instance. - SetPreallocatedDeposedKey(key states.DeposedKey) -} - -// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. -// Embed it in a node type to get automatic support for it, and then access -// the field PreallocatedDeposedKey to access any pre-allocated key. -type graphNodeDeposer struct { - PreallocatedDeposedKey states.DeposedKey -} - -func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { - n.PreallocatedDeposedKey = key -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go deleted file mode 100644 index af4de247..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go +++ /dev/null @@ -1,286 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// nodeExpandPlannableResource handles the first layer of resource -// expansion. We need this extra layer so DynamicExpand is called twice for -// the resource, the first to expand the Resource for each module instance, and -// the second to expand each ResourceInstance for the expanded Resources. -type nodeExpandPlannableResource struct { - *NodeAbstractResource - - // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD - // during graph construction, if dependencies require us to force this - // on regardless of what the configuration says. - ForceCreateBeforeDestroy *bool -} - -var ( - _ GraphNodeDestroyerCBD = (*nodeExpandPlannableResource)(nil) - _ GraphNodeDynamicExpandable = (*nodeExpandPlannableResource)(nil) - _ GraphNodeReferenceable = (*nodeExpandPlannableResource)(nil) - _ GraphNodeReferencer = (*nodeExpandPlannableResource)(nil) - _ GraphNodeConfigResource = (*nodeExpandPlannableResource)(nil) - _ GraphNodeAttachResourceConfig = (*nodeExpandPlannableResource)(nil) -) - -func (n *nodeExpandPlannableResource) Name() string { - return n.NodeAbstractResource.Name() + " (expand)" -} - -// GraphNodeDestroyerCBD -func (n *nodeExpandPlannableResource) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy != nil { - return *n.ForceCreateBeforeDestroy - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *nodeExpandPlannableResource) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = &v - return nil -} - -func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - - expander := ctx.InstanceExpander() - var resources []addrs.AbsResource - moduleInstances := expander.ExpandModule(n.Addr.Module) - - // Add the current expanded resource to the graph - for _, module := range moduleInstances { - resAddr := n.Addr.Resource.Absolute(module) - resources = append(resources, resAddr) - g.Add(&NodePlannableResource{ - NodeAbstractResource: n.NodeAbstractResource, - Addr: resAddr, - ForceCreateBeforeDestroy: n.ForceCreateBeforeDestroy, - }) - } - - // Lock the state while we inspect it - state := ctx.State().Lock() - defer ctx.State().Unlock() - - var orphans []*states.Resource - for _, res := range state.Resources(n.Addr) { - found := false - for _, m := range moduleInstances { - if m.Equal(res.Addr.Module) { - found = true - break - } - } - // Address form state was not found in the current config - if !found { - orphans = append(orphans, res) - } - } - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } - - for _, res := range orphans { - for key := range res.Instances { - addr := res.Addr.Instance(key) - abs := NewNodeAbstractResourceInstance(addr) - abs.AttachResourceState(res) - n := concreteResourceOrphan(abs) - g.Add(n) - } - } - - return &g, nil -} - -// NodePlannableResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodePlannableResource struct { - *NodeAbstractResource - - Addr addrs.AbsResource - - // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD - // during graph construction, if dependencies require us to force this - // on regardless of what the configuration says. - ForceCreateBeforeDestroy *bool -} - -var ( - _ GraphNodeModuleInstance = (*NodePlannableResource)(nil) - _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) - _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) - _ GraphNodeReferenceable = (*NodePlannableResource)(nil) - _ GraphNodeReferencer = (*NodePlannableResource)(nil) - _ GraphNodeConfigResource = (*NodePlannableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) -) - -func (n *NodePlannableResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -func (n *NodePlannableResource) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodePlannableResource) ModuleInstance() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable -func (n *NodePlannableResource) EvalTree() EvalNode { - if n.Config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name()) - return &EvalNoop{} - } - - // this ensures we can reference the resource even if the count is 0 - return &EvalWriteResourceState{ - Addr: n.Addr, - Config: n.Config, - ProviderAddr: n.ResolvedProvider, - } -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy != nil { - return *n.ForceCreateBeforeDestroy - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = &v - return nil -} - -// GraphNodeDynamicExpandable -func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - // We need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.Addr.Config(), n.Config.Count != nil) - - // Our instance expander should already have been informed about the - // expansion of this resource and of all of its containing modules, so - // it can tell us which instance addresses we need to process. - expander := ctx.InstanceExpander() - instanceAddrs := expander.ExpandResource(n.ResourceAddr().Absolute(ctx.Path())) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - a.dependsOn = n.dependsOn - - return &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: a, - - // By the time we're walking, we've figured out whether we need - // to force on CreateBeforeDestroy due to dependencies on other - // nodes that have it. - ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - } - } - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - a.ProviderMetas = n.ProviderMetas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count or for_each (if present) - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Addr: n.ResourceAddr(), - InstanceAddrs: instanceAddrs, - }, - - // Add the count/for_each orphans - &OrphanResourceInstanceCountTransformer{ - Concrete: concreteResourceOrphan, - Addr: n.Addr, - InstanceAddrs: instanceAddrs, - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodePlannableResource", - } - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go deleted file mode 100644 index d53c1441..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// NodePlanDestroyableResourceInstance represents a resource that is ready -// to be planned for destruction. -type NodePlanDestroyableResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeModuleInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. These are written to by address in the EvalNodes we - // declare below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - if n.ResolvedProvider.Provider.Type == "" { - // Should never happen; indicates that the graph was not constructed - // correctly since we didn't get our provider attached. - panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go deleted file mode 100644 index 19f2b633..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ /dev/null @@ -1,164 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/terraform/addrs" -) - -// NodePlannableResourceInstance represents a _single_ resource -// instance that is plannable. This means this represents a single -// count index, for example. -type NodePlannableResourceInstance struct { - *NodeAbstractResourceInstance - ForceCreateBeforeDestroy bool -} - -var ( - _ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &evalReadDataPlan{ - evalReadData: evalReadData{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - OutputChange: &change, - State: &state, - }, - dependsOn: n.dependsOn, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} - -func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Output: &state, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - CreateBeforeDestroy: n.ForceCreateBeforeDestroy, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go deleted file mode 100644 index 20ad53c3..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go +++ /dev/null @@ -1,84 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlannableResourceInstanceOrphan struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeModuleInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeConfigResource = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -var ( - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -func (n *NodePlannableResourceInstanceOrphan) Name() string { - return n.ResourceInstanceAddr().String() + " (orphan)" -} - -// GraphNodeEvalable -func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var provider providers.Interface - var providerSchema *ProviderSchema - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - Output: &change, - OutputState: &state, // Will point to a nil state after this complete, signalling destroyed - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go deleted file mode 100644 index 8151ad3a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ /dev/null @@ -1,379 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// nodeExpandRefreshableResource handles the first layer of resource -// expansion durin refresh. We need this extra layer so DynamicExpand is called -// twice for the resource, the first to expand the Resource for each module -// instance, and the second to expand each ResourceInstance for the expanded -// Resources. -type nodeExpandRefreshableManagedResource struct { - *NodeAbstractResource - - // We attach dependencies to the Resource during refresh, since the - // instances are instantiated during DynamicExpand. - Dependencies []addrs.ConfigResource -} - -var ( - _ GraphNodeDynamicExpandable = (*nodeExpandRefreshableManagedResource)(nil) - _ GraphNodeReferenceable = (*nodeExpandRefreshableManagedResource)(nil) - _ GraphNodeReferencer = (*nodeExpandRefreshableManagedResource)(nil) - _ GraphNodeConfigResource = (*nodeExpandRefreshableManagedResource)(nil) - _ GraphNodeAttachResourceConfig = (*nodeExpandRefreshableManagedResource)(nil) - _ GraphNodeAttachDependencies = (*nodeExpandRefreshableManagedResource)(nil) -) - -func (n *nodeExpandRefreshableManagedResource) Name() string { - return n.NodeAbstractResource.Name() + " (expand)" -} - -// GraphNodeAttachDependencies -func (n *nodeExpandRefreshableManagedResource) AttachDependencies(deps []addrs.ConfigResource) { - n.Dependencies = deps -} - -func (n *nodeExpandRefreshableManagedResource) References() []*addrs.Reference { - return (&NodeRefreshableManagedResource{NodeAbstractResource: n.NodeAbstractResource}).References() -} - -func (n *nodeExpandRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var g Graph - - expander := ctx.InstanceExpander() - for _, module := range expander.ExpandModule(n.Addr.Module) { - g.Add(&NodeRefreshableManagedResource{ - NodeAbstractResource: n.NodeAbstractResource, - Addr: n.Addr.Resource.Absolute(module), - Dependencies: n.Dependencies, - }) - } - - return &g, nil -} - -// NodeRefreshableManagedResource represents a resource that is expandable into -// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. -type NodeRefreshableManagedResource struct { - *NodeAbstractResource - - Addr addrs.AbsResource - - // We attach dependencies to the Resource during refresh, since the - // instances are instantiated during DynamicExpand. - Dependencies []addrs.ConfigResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeConfigResource = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) -) - -func (n *NodeRefreshableManagedResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - expander := ctx.InstanceExpander() - // Inform our instance expander about our expansion results, and then use - // it to calculate the instance addresses we'll expand for. - switch { - case n.Config.Count != nil: - count, countDiags := evaluateCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - expander.SetResourceCount(n.Addr.Module, n.Addr.Resource, count) - - case n.Config.ForEach != nil: - forEachMap, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - expander.SetResourceForEach(n.Addr.Module, n.Addr.Resource, forEachMap) - - default: - expander.SetResourceSingle(n.Addr.Module, n.Addr.Resource) - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.Addr.Config(), n.Config.Count != nil) - instanceAddrs := expander.ExpandResource(n.Addr) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Dependencies = n.Dependencies - a.ProviderMetas = n.ProviderMetas - - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Addr: n.Addr.Config(), - InstanceAddrs: instanceAddrs, - }, - - // Add the count orphans to make sure these resources are accounted for - // during a scale in. - &OrphanResourceInstanceCountTransformer{ - Concrete: concreteResource, - Addr: n.Addr, - InstanceAddrs: instanceAddrs, - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableManagedResource", - } - - graph, diags := b.Build(nil) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeRefreshableManagedResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeModuleInstance = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeConfigResource = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - if n.ResourceState == nil { - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) - return n.evalTreeManagedResourceNoState() - } - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) - return n.evalTreeManagedResource() - - case addrs.DataResourceMode: - // Get the data source node. If we don't have a configuration - // then it is an orphan so we destroy it (remove it from the state). - var dn GraphNodeEvalable - if n.Config != nil { - dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } else { - dn = &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } - - return dn.EvalTree() - default: - panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) - } -} - -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - // This happened during initial development. All known cases were - // fixed and tested but as a sanity check let's assert here. - if n.ResourceState == nil { - err := fmt.Errorf( - "No resource state attached for addr: %s\n\n"+ - "This is a bug. Please report this to Terraform with your configuration\n"+ - "and state attached. Please be careful to scrub any sensitive information.", - addr) - return &EvalReturnError{Error: &err} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalRefreshDependencies{ - State: &state, - Dependencies: &n.Dependencies, - }, - - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - }, - } -} - -// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource -// nodes that don't have state attached. An example of where this functionality -// is useful is when a resource that already exists in state is being scaled -// out, ie: has its resource count increased. In this case, the scaled out node -// needs to be available to other nodes (namely data sources) that may depend -// on it for proper interpolation, or confusing "index out of range" errors can -// occur. -// -// The steps in this sequence are very similar to the steps carried out in -// plan, but nothing is done with the diff after it is created - it is dropped, -// and its changes are not counted in the UI. -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - Stub: true, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - - // We must also save the planned change, so that expressions in - // other nodes, such as provider configurations and data resources, - // can work with the planned new value. - // - // This depends on the fact that Context.Refresh creates a - // temporary new empty changeset for the duration of its graph - // walk, and so this recorded change will be discarded immediately - // after the refresh walk completes. - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go deleted file mode 100644 index 0228e3d1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go +++ /dev/null @@ -1,99 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/zclconf/go-cty/cty" -) - -// NodeValidatableResource represents a resource that is used for validation -// only. -type NodeValidatableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeModuleInstance = (*NodeValidatableResource)(nil) - _ GraphNodeEvalable = (*NodeValidatableResource)(nil) - _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) - _ GraphNodeReferencer = (*NodeValidatableResource)(nil) - _ GraphNodeConfigResource = (*NodeValidatableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) - _ GraphNodeAttachProviderMetaConfigs = (*NodeValidatableResource)(nil) -) - -func (n *NodeValidatableResource) Path() addrs.ModuleInstance { - // There is no expansion during validation, so we evaluate everything as - // single module instances. - return n.Addr.Module.UnkeyedInstanceShim() -} - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - // Declare the variables will be used are used to pass values along - // the evaluation sequence below. These are written to via pointers - // passed to the EvalNodes. - var provider providers.Interface - var providerSchema *ProviderSchema - var configVal cty.Value - - seq := &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalValidateResource{ - Addr: addr.Resource, - Provider: &provider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - Config: config, - ConfigVal: &configVal, - }, - }, - } - - if managed := n.Config.Managed; managed != nil { - hasCount := n.Config.Count != nil - hasForEach := n.Config.ForEach != nil - - // Validate all the provisioners - for _, p := range managed.Provisioners { - var provisioner provisioners.Interface - var provisionerSchema *configschema.Block - - if p.Connection == nil { - p.Connection = config.Managed.Connection - } else if config.Managed.Connection != nil { - p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) - } - - seq.Nodes = append( - seq.Nodes, - &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - Schema: &provisionerSchema, - }, - &EvalValidateProvisioner{ - ResourceAddr: addr.Resource, - Provisioner: &provisioner, - Schema: &provisionerSchema, - Config: p, - ResourceHasCount: hasCount, - ResourceHasForEach: hasForEach, - }, - ) - } - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go deleted file mode 100644 index f9814f37..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// NodeRootVariable represents a root variable input. -type NodeRootVariable struct { - Addr addrs.InputVariable - Config *configs.Variable -} - -var ( - _ GraphNodeModuleInstance = (*NodeRootVariable)(nil) - _ GraphNodeReferenceable = (*NodeRootVariable)(nil) -) - -func (n *NodeRootVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeModuleInstance -func (n *NodeRootVariable) Path() addrs.ModuleInstance { - return addrs.RootModuleInstance -} - -func (n *NodeRootVariable) ModulePath() addrs.Module { - return addrs.RootModule -} - -// GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// GraphNodeEvalable -func (n *NodeRootVariable) EvalTree() EvalNode { - // We don't actually need to _evaluate_ a root module variable, because - // its value is always constant and already stashed away in our EvalContext. - // However, we might need to run some user-defined validation rules against - // the value. - - if n.Config == nil || len(n.Config.Validations) == 0 { - return &EvalSequence{} // nothing to do - } - - return &evalVariableValidations{ - Addr: addrs.RootModuleInstance.InputVariable(n.Addr.Name), - Config: n.Config, - Expr: nil, // not set for root module variables - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_value.go b/vendor/github.com/hashicorp/terraform/terraform/node_value.go deleted file mode 100644 index 62a6e6ae..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_value.go +++ /dev/null @@ -1,10 +0,0 @@ -package terraform - -// graphNodeTemporaryValue is implemented by nodes that may represent temporary -// values, which are those not saved to the state file. This includes locals, -// variables, and non-root outputs. -// A boolean return value allows a node which may need to be saved to -// conditionally do so. -type graphNodeTemporaryValue interface { - temporaryValue() bool -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go deleted file mode 100644 index af04c6cd..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ /dev/null @@ -1,122 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/gob" - "fmt" - "io" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs" -) - -func init() { - gob.Register(make([]interface{}, 0)) - gob.Register(make([]map[string]interface{}, 0)) - gob.Register(make(map[string]interface{})) - gob.Register(make(map[string]string)) -} - -// Plan represents a single Terraform execution plan, which contains -// all the information necessary to make an infrastructure change. -// -// A plan has to contain basically the entire state of the world -// necessary to make a change: the state, diff, config, backend config, etc. -// This is so that it can run alone without any other data. -type Plan struct { - // Diff describes the resource actions that must be taken when this - // plan is applied. - Diff *Diff - - // Config represents the entire configuration that was present when this - // plan was created. - Config *configs.Config - - // State is the Terraform state that was current when this plan was - // created. - // - // It is not allowed to apply a plan that has a stale state, since its - // diff could be outdated. - State *State - - // Vars retains the variables that were set when creating the plan, so - // that the same variables can be applied during apply. - Vars map[string]cty.Value - - // Targets, if non-empty, contains a set of resource address strings that - // identify graph nodes that were selected as targets for plan. - // - // When targets are set, any graph node that is not directly targeted or - // indirectly targeted via dependencies is excluded from the graph. - Targets []string - - // TerraformVersion is the version of Terraform that was used to create - // this plan. - // - // It is not allowed to apply a plan created with a different version of - // Terraform, since the other fields of this structure may be interpreted - // in different ways between versions. - TerraformVersion string - - // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries - // used as plugins for each provider during plan. - // - // These must match between plan and apply to ensure that the diff is - // correctly interpreted, since different provider versions may have - // different attributes or attribute value constraints. - ProviderSHA256s map[string][]byte - - // Backend is the backend that this plan should use and store data with. - Backend *BackendState - - // Destroy indicates that this plan was created for a full destroy operation - Destroy bool - - once sync.Once -} - -func (p *Plan) String() string { - buf := new(bytes.Buffer) - buf.WriteString("DIFF:\n\n") - buf.WriteString(p.Diff.String()) - buf.WriteString("\n\nSTATE:\n\n") - buf.WriteString(p.State.String()) - return buf.String() -} - -func (p *Plan) init() { - p.once.Do(func() { - if p.Diff == nil { - p.Diff = new(Diff) - p.Diff.init() - } - - if p.State == nil { - p.State = new(State) - p.State.init() - } - - if p.Vars == nil { - p.Vars = make(map[string]cty.Value) - } - }) -} - -// The format byte is prefixed into the plan file format so that we have -// the ability in the future to change the file format if we want for any -// reason. -const planFormatMagic = "tfplan" -const planFormatVersion byte = 2 - -// ReadPlan reads a plan structure out of a reader in the format that -// was written by WritePlan. -func ReadPlan(src io.Reader) (*Plan, error) { - return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") -} - -// WritePlan writes a plan somewhere in a binary format. -func WritePlan(d *Plan, dst io.Writer) error { - return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go deleted file mode 100644 index ed7b7849..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go +++ /dev/null @@ -1,522 +0,0 @@ -package terraform - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/tfdiags" -) - -var _ providers.Interface = (*MockProvider)(nil) - -// MockProvider implements providers.Interface but mocks out all the -// calls for testing purposes. -type MockProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests - - PrepareProviderConfigCalled bool - PrepareProviderConfigResponse providers.PrepareProviderConfigResponse - PrepareProviderConfigRequest providers.PrepareProviderConfigRequest - PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse - - ValidateResourceTypeConfigCalled bool - ValidateResourceTypeConfigTypeName string - ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse - ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest - ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse - - ValidateDataSourceConfigCalled bool - ValidateDataSourceConfigTypeName string - ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse - ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest - ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse - - UpgradeResourceStateCalled bool - UpgradeResourceStateTypeName string - UpgradeResourceStateResponse providers.UpgradeResourceStateResponse - UpgradeResourceStateRequest providers.UpgradeResourceStateRequest - UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse - - ConfigureCalled bool - ConfigureResponse providers.ConfigureResponse - ConfigureRequest providers.ConfigureRequest - ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below - - StopCalled bool - StopFn func() error - StopResponse error - - ReadResourceCalled bool - ReadResourceResponse providers.ReadResourceResponse - ReadResourceRequest providers.ReadResourceRequest - ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse - - PlanResourceChangeCalled bool - PlanResourceChangeResponse providers.PlanResourceChangeResponse - PlanResourceChangeRequest providers.PlanResourceChangeRequest - PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse - - ApplyResourceChangeCalled bool - ApplyResourceChangeResponse providers.ApplyResourceChangeResponse - ApplyResourceChangeRequest providers.ApplyResourceChangeRequest - ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse - - ImportResourceStateCalled bool - ImportResourceStateResponse providers.ImportResourceStateResponse - ImportResourceStateRequest providers.ImportResourceStateRequest - ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - // Legacy return type for existing tests, which will be shimmed into an - // ImportResourceStateResponse if set - ImportStateReturn []*InstanceState - - ReadDataSourceCalled bool - ReadDataSourceResponse providers.ReadDataSourceResponse - ReadDataSourceRequest providers.ReadDataSourceRequest - ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse - - CloseCalled bool - CloseError error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ValidateFn func(c *ResourceConfig) (ws []string, es []error) - ConfigureFn func(c *ResourceConfig) error - DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) - ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) -} - -func (p *MockProvider) GetSchema() providers.GetSchemaResponse { - p.Lock() - defer p.Unlock() - p.GetSchemaCalled = true - return p.getSchema() -} - -func (p *MockProvider) getSchema() providers.GetSchemaResponse { - // This version of getSchema doesn't do any locking, so it's suitable to - // call from other methods of this mock as long as they are already - // holding the lock. - - ret := providers.GetSchemaResponse{ - Provider: providers.Schema{}, - DataSources: map[string]providers.Schema{}, - ResourceTypes: map[string]providers.Schema{}, - } - if p.GetSchemaReturn != nil { - ret.Provider.Block = p.GetSchemaReturn.Provider - ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta - for n, s := range p.GetSchemaReturn.DataSources { - ret.DataSources[n] = providers.Schema{ - Block: s, - } - } - for n, s := range p.GetSchemaReturn.ResourceTypes { - ret.ResourceTypes[n] = providers.Schema{ - Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), - Block: s, - } - } - } - - return ret -} - -func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { - p.Lock() - defer p.Unlock() - - p.PrepareProviderConfigCalled = true - p.PrepareProviderConfigRequest = r - if p.PrepareProviderConfigFn != nil { - return p.PrepareProviderConfigFn(r) - } - return p.PrepareProviderConfigResponse -} - -func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateResourceTypeConfigCalled = true - p.ValidateResourceTypeConfigRequest = r - - if p.ValidateFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - warns, errs := p.ValidateFn(rc) - ret := providers.ValidateResourceTypeConfigResponse{} - for _, warn := range warns { - ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - } - if p.ValidateResourceTypeConfigFn != nil { - return p.ValidateResourceTypeConfigFn(r) - } - - return p.ValidateResourceTypeConfigResponse -} - -func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceConfigCalled = true - p.ValidateDataSourceConfigRequest = r - - if p.ValidateDataSourceConfigFn != nil { - return p.ValidateDataSourceConfigFn(r) - } - - return p.ValidateDataSourceConfigResponse -} - -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - p.Lock() - defer p.Unlock() - - schemas := p.getSchema() - schema := schemas.ResourceTypes[r.TypeName] - schemaType := schema.Block.ImpliedType() - - p.UpgradeResourceStateCalled = true - p.UpgradeResourceStateRequest = r - - if p.UpgradeResourceStateFn != nil { - return p.UpgradeResourceStateFn(r) - } - - resp := p.UpgradeResourceStateResponse - - if resp.UpgradedState == cty.NilVal { - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } - } - return resp -} - -func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureRequest = r - - if p.ConfigureFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - ret := providers.ConfigureResponse{} - - err := p.ConfigureFn(rc) - if err != nil { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - return ret - } - if p.ConfigureNewFn != nil { - return p.ConfigureNewFn(r) - } - - return p.ConfigureResponse -} - -func (p *MockProvider) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provider itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadResourceCalled = true - p.ReadResourceRequest = r - - if p.ReadResourceFn != nil { - return p.ReadResourceFn(r) - } - - // make sure the NewState fits the schema - newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) - if err != nil { - panic(err) - } - resp := p.ReadResourceResponse - resp.NewState = newState - - return resp -} - -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - p.Lock() - defer p.Unlock() - - p.PlanResourceChangeCalled = true - p.PlanResourceChangeRequest = r - - if p.DiffFn != nil { - ps := p.getSchema() - if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { - return providers.PlanResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), - } - } - schema := ps.ResourceTypes[r.TypeName].Block - info := &InstanceInfo{ - Type: r.TypeName, - } - priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) - cfg := NewResourceConfigShimmed(r.Config, schema) - - legacyDiff, err := p.DiffFn(info, priorState, cfg) - - var res providers.PlanResourceChangeResponse - res.PlannedState = r.ProposedNewState - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - if legacyDiff != nil { - newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - - res.PlannedState = newVal - - var requiresNew []string - for attr, d := range legacyDiff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - res.RequiresReplace = requiresReplace - } - return res - } - if p.PlanResourceChangeFn != nil { - return p.PlanResourceChangeFn(r) - } - - return p.PlanResourceChangeResponse -} - -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - p.Lock() - p.ApplyResourceChangeCalled = true - p.ApplyResourceChangeRequest = r - p.Unlock() - - if p.ApplyFn != nil { - // ApplyFn is a special callback fashioned after our old provider - // interface, which expected to be given an actual diff rather than - // separate old/new values to apply. Therefore we need to approximate - // a diff here well enough that _most_ of our legacy ApplyFns in old - // tests still see the behavior they are expecting. New tests should - // not use this, and should instead use ApplyResourceChangeFn directly. - providerSchema := p.getSchema() - schema, ok := providerSchema.ResourceTypes[r.TypeName] - if !ok { - return providers.ApplyResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), - } - } - - info := &InstanceInfo{ - Type: r.TypeName, - } - - priorVal := r.PriorState - plannedVal := r.PlannedState - priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) - plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) - s := NewInstanceStateShimmedFromValue(priorVal, 0) - d := &InstanceDiff{ - Attributes: make(map[string]*ResourceAttrDiff), - } - if plannedMap == nil { // destroying, then - d.Destroy = true - // Destroy diffs don't have any attribute diffs - } else { - if priorMap == nil { // creating, then - // We'll just make an empty prior map to make things easier below. - priorMap = make(map[string]string) - } - - for k, new := range plannedMap { - old := priorMap[k] - newComputed := false - if new == hcl2shim.UnknownVariableValue { - new = "" - newComputed = true - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - New: new, - NewComputed: newComputed, - Type: DiffAttrInput, // not generally used in tests, so just hard-coded - } - } - // Also need any attributes that were removed in "planned" - for k, old := range priorMap { - if _, ok := plannedMap[k]; ok { - continue - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - NewRemoved: true, - Type: DiffAttrInput, - } - } - } - newState, err := p.ApplyFn(info, s, d) - resp := providers.ApplyResourceChangeResponse{} - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - if newState != nil { - var newVal cty.Value - if newState != nil { - var err error - newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - } else { - // If apply returned a nil new state then that's the old way to - // indicate that the object was destroyed. Our new interface calls - // for that to be signalled as a null value. - newVal = cty.NullVal(schema.Block.ImpliedType()) - } - resp.NewState = newVal - } - - return resp - } - if p.ApplyResourceChangeFn != nil { - return p.ApplyResourceChangeFn(r) - } - - return p.ApplyResourceChangeResponse -} - -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - p.Lock() - defer p.Unlock() - - if p.ImportStateReturn != nil { - for _, is := range p.ImportStateReturn { - if is.Attributes == nil { - is.Attributes = make(map[string]string) - } - is.Attributes["id"] = is.ID - - typeName := is.Ephemeral.Type - // Use the requested type if the resource has no type of it's own. - // We still return the empty type, which will error, but this prevents a panic. - if typeName == "" { - typeName = r.TypeName - } - - schema := p.GetSchemaReturn.ResourceTypes[typeName] - if schema == nil { - panic("no schema found for " + typeName) - } - - private, err := json.Marshal(is.Meta) - if err != nil { - panic(err) - } - - state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) - if err != nil { - panic(err) - } - - state, err = schema.CoerceValue(state) - if err != nil { - panic(err) - } - - p.ImportResourceStateResponse.ImportedResources = append( - p.ImportResourceStateResponse.ImportedResources, - providers.ImportedResource{ - TypeName: is.Ephemeral.Type, - State: state, - Private: private, - }) - } - } - - p.ImportResourceStateCalled = true - p.ImportResourceStateRequest = r - if p.ImportResourceStateFn != nil { - return p.ImportResourceStateFn(r) - } - - return p.ImportResourceStateResponse -} - -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadDataSourceCalled = true - p.ReadDataSourceRequest = r - - if p.ReadDataSourceFn != nil { - return p.ReadDataSourceFn(r) - } - - return p.ReadDataSourceResponse -} - -func (p *MockProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go deleted file mode 100644 index d476e4ea..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go +++ /dev/null @@ -1,153 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/provisioners" -) - -var _ provisioners.Interface = (*MockProvisioner)(nil) - -// MockProvisioner implements provisioners.Interface but mocks out all the -// calls for testing purposes. -type MockProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaResponse provisioners.GetSchemaResponse - - ValidateProvisionerConfigCalled bool - ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest - ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse - ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse - - ProvisionResourceCalled bool - ProvisionResourceRequest provisioners.ProvisionResourceRequest - ProvisionResourceResponse provisioners.ProvisionResourceResponse - ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse - - StopCalled bool - StopResponse error - StopFn func() error - - CloseCalled bool - CloseResponse error - CloseFn func() error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ApplyFn func(rs *InstanceState, c *ResourceConfig) error -} - -func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - return p.getSchema() -} - -// getSchema is the implementation of GetSchema, which can be called from other -// methods on MockProvisioner that may already be holding the lock. -func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { - return p.GetSchemaResponse -} - -func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProvisionerConfigCalled = true - p.ValidateProvisionerConfigRequest = r - if p.ValidateProvisionerConfigFn != nil { - return p.ValidateProvisionerConfigFn(r) - } - return p.ValidateProvisionerConfigResponse -} - -func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { - p.Lock() - defer p.Unlock() - - p.ProvisionResourceCalled = true - p.ProvisionResourceRequest = r - if p.ApplyFn != nil { - if !r.Config.IsKnown() { - panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) - } - - schema := p.getSchema() - rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) - connVal := r.Connection - connMap := map[string]string{} - - if !connVal.IsNull() && connVal.IsKnown() { - for it := connVal.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - - if !av.IsKnown() || av.IsNull() { - continue - } - - av, _ = convert.Convert(av, cty.String) - connMap[name] = av.AsString() - } - } - - // We no longer pass the full instance state to a provisioner, so we'll - // construct a partial one that should be good enough for what existing - // test mocks need. - is := &InstanceState{ - Ephemeral: EphemeralState{ - ConnInfo: connMap, - }, - } - var resp provisioners.ProvisionResourceResponse - err := p.ApplyFn(is, rc) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - return resp - } - if p.ProvisionResourceFn != nil { - fn := p.ProvisionResourceFn - return fn(r) - } - - return p.ProvisionResourceResponse -} - -func (p *MockProvisioner) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provisioner itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvisioner) Close() error { - p.Lock() - defer p.Unlock() - - p.CloseCalled = true - if p.CloseFn != nil { - return p.CloseFn() - } - - return p.CloseResponse -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go deleted file mode 100644 index fcf28aa6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ /dev/null @@ -1,551 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// Resource is a legacy way to identify a particular resource instance. -// -// New code should use addrs.ResourceInstance instead. This is still here -// only for codepaths that haven't been updated yet. -type Resource struct { - // These are all used by the new EvalNode stuff. - Name string - Type string - CountIndex int - - // These aren't really used anymore anywhere, but we keep them around - // since we haven't done a proper cleanup yet. - Id string - Info *InstanceInfo - Config *ResourceConfig - Dependencies []string - Diff *InstanceDiff - Provider ResourceProvider - State *InstanceState - Flags ResourceFlag -} - -// NewResource constructs a legacy Resource object from an -// addrs.ResourceInstance value. -// -// This is provided to shim to old codepaths that haven't been updated away -// from this type yet. Since this old type is not able to represent instances -// that have string keys, this function will panic if given a resource address -// that has a string key. -func NewResource(addr addrs.ResourceInstance) *Resource { - ret := &Resource{ - Name: addr.Resource.Name, - Type: addr.Resource.Type, - } - - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - ret.CountIndex = int(tk) - default: - panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) - } - } - - return ret -} - -// ResourceKind specifies what kind of instance we're working with, whether -// its a primary instance, a tainted instance, or an orphan. -type ResourceFlag byte - -// InstanceInfo is used to hold information about the instance and/or -// resource being modified. -type InstanceInfo struct { - // Id is a unique name to represent this instance. This is not related - // to InstanceState.ID in any way. - Id string - - // ModulePath is the complete path of the module containing this - // instance. - ModulePath []string - - // Type is the resource type of this instance - Type string - - // uniqueExtra is an internal field that can be populated to supply - // extra metadata that is used to identify a unique instance in - // the graph walk. This will be appended to HumanID when uniqueId - // is called. - uniqueExtra string -} - -// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. -// -// InstanceInfo is a legacy type, and uses of it should be gradually replaced -// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as -// appropriate. -// -// The legacy InstanceInfo type cannot represent module instances with instance -// keys, so this function will panic if given such a path. Uses of this type -// should all be removed or replaced before implementing "count" and "for_each" -// arguments on modules in order to avoid such panics. -// -// This legacy type also cannot represent resource instances with string -// instance keys. It will panic if the given key is not either NoKey or an -// IntKey. -func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { - // We need an old-style []string module path for InstanceInfo. - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - panic("NewInstanceInfo cannot convert module instance with key") - } - path[i] = step.Name - } - - // This is a funny old meaning of "id" that is no longer current. It should - // not be used for anything users might see. Note that it does not include - // a representation of the resource mode, and so it's impossible to - // determine from an InstanceInfo alone whether it is a managed or data - // resource that is being referred to. - id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) - if addr.Resource.Resource.Mode == addrs.DataResourceMode { - id = "data." + id - } - if addr.Resource.Key != addrs.NoKey { - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - id = id + fmt.Sprintf(".%d", int(k)) - default: - panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) - } - } - - return &InstanceInfo{ - Id: id, - ModulePath: path, - Type: addr.Resource.Resource.Type, - } -} - -// ResourceAddress returns the address of the resource that the receiver is describing. -func (i *InstanceInfo) ResourceAddress() *ResourceAddress { - // GROSS: for tainted and deposed instances, their status gets appended - // to i.Id to create a unique id for the graph node. Historically these - // ids were displayed to the user, so it's designed to be human-readable: - // "aws_instance.bar.0 (deposed #0)" - // - // So here we detect such suffixes and try to interpret them back to - // their original meaning so we can then produce a ResourceAddress - // with a suitable InstanceType. - id := i.Id - instanceType := TypeInvalid - if idx := strings.Index(id, " ("); idx != -1 { - remain := id[idx:] - id = id[:idx] - - switch { - case strings.Contains(remain, "tainted"): - instanceType = TypeTainted - case strings.Contains(remain, "deposed"): - instanceType = TypeDeposed - } - } - - addr, err := parseResourceAddressInternal(id) - if err != nil { - // should never happen, since that would indicate a bug in the - // code that constructed this InstanceInfo. - panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) - } - if len(i.ModulePath) > 1 { - addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied - } - if instanceType != TypeInvalid { - addr.InstanceTypeSet = true - addr.InstanceType = instanceType - } - return addr -} - -// ResourceConfig is a legacy type that was formerly used to represent -// interpolatable configuration blocks. It is now only used to shim to old -// APIs that still use this type, via NewResourceConfigShimmed. -type ResourceConfig struct { - ComputedKeys []string - Raw map[string]interface{} - Config map[string]interface{} - - raw *config.RawConfig -} - -// NewResourceConfig creates a new ResourceConfig from a config.RawConfig. -func NewResourceConfig(c *config.RawConfig) *ResourceConfig { - result := &ResourceConfig{raw: c} - result.interpolateForce() - return result -} - -// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly -// the given value. -// -// The given value may contain hcl2shim.UnknownVariableValue to signal that -// something is computed, but it must not contain unprocessed interpolation -// sequences as we might've seen in Terraform v0.11 and prior. -func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { - v := hcl2shim.HCL2ValueFromConfigValue(raw) - - // This is a little weird but we round-trip the value through the hcl2shim - // package here for two reasons: firstly, because that reduces the risk - // of it including something unlike what NewResourceConfigShimmed would - // produce, and secondly because it creates a copy of "raw" just in case - // something is relying on the fact that in the old world the raw and - // config maps were always distinct, and thus you could in principle mutate - // one without affecting the other. (I sure hope nobody was doing that, though!) - cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) - - return &ResourceConfig{ - Raw: raw, - Config: cfg, - - ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), - } -} - -// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy -// ResourceConfig object, so that it can be passed to older APIs that expect -// this wrapping. -// -// The returned ResourceConfig is already interpolated and cannot be -// re-interpolated. It is, therefore, useful only to functions that expect -// an already-populated ResourceConfig which they then treat as read-only. -// -// If the given value is not of an object type that conforms to the given -// schema then this function will panic. -func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { - if !val.Type().IsObjectType() { - panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) - } - ret := &ResourceConfig{} - - legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) - if legacyVal != nil { - ret.Config = legacyVal - - // Now we need to walk through our structure and find any unknown values, - // producing the separate list ComputedKeys to represent these. We use the - // schema here so that we can preserve the expected invariant - // that an attribute is always either wholly known or wholly unknown, while - // a child block can be partially unknown. - ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") - } else { - ret.Config = make(map[string]interface{}) - } - ret.Raw = ret.Config - - return ret -} - -// Record the any config values in ComputedKeys. This field had been unused in -// helper/schema, but in the new protocol we're using this so that the SDK can -// now handle having an unknown collection. The legacy diff code doesn't -// properly handle the unknown, because it can't be expressed in the same way -// between the config and diff. -func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { - var ret []string - ty := val.Type() - - if val.IsNull() { - return ret - } - - if !val.IsKnown() { - // we shouldn't have an entirely unknown resource, but prevent empty - // strings just in case - if len(path) > 0 { - ret = append(ret, path) - } - return ret - } - - if path != "" { - path += "." - } - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): - i := 0 - for it := val.ElementIterator(); it.Next(); i++ { - _, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) - ret = append(ret, keys...) - } - - case ty.IsMapType(), ty.IsObjectType(): - for it := val.ElementIterator(); it.Next(); { - subK, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) - ret = append(ret, keys...) - } - } - - return ret -} - -// DeepCopy performs a deep copy of the configuration. This makes it safe -// to modify any of the structures that are part of the resource config without -// affecting the original configuration. -func (c *ResourceConfig) DeepCopy() *ResourceConfig { - // DeepCopying a nil should return a nil to avoid panics - if c == nil { - return nil - } - - // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) - if err != nil { - panic(err) - } - - // Force the type - result := copy.(*ResourceConfig) - - return result -} - -// Equal checks the equality of two resource configs. -func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { - // If either are nil, then they're only equal if they're both nil - if c == nil || c2 == nil { - return c == c2 - } - - // Sort the computed keys so they're deterministic - sort.Strings(c.ComputedKeys) - sort.Strings(c2.ComputedKeys) - - // Two resource configs if their exported properties are equal. - // We don't compare "raw" because it is never used again after - // initialization and for all intents and purposes they are equal - // if the exported properties are equal. - check := [][2]interface{}{ - {c.ComputedKeys, c2.ComputedKeys}, - {c.Raw, c2.Raw}, - {c.Config, c2.Config}, - } - for _, pair := range check { - if !reflect.DeepEqual(pair[0], pair[1]) { - return false - } - } - - return true -} - -// CheckSet checks that the given list of configuration keys is -// properly set. If not, errors are returned for each unset key. -// -// This is useful to be called in the Validate method of a ResourceProvider. -func (c *ResourceConfig) CheckSet(keys []string) []error { - var errs []error - - for _, k := range keys { - if !c.IsSet(k) { - errs = append(errs, fmt.Errorf("%s must be set", k)) - } - } - - return errs -} - -// Get looks up a configuration value by key and returns the value. -// -// The second return value is true if the get was successful. Get will -// return the raw value if the key is computed, so you should pair this -// with IsComputed. -func (c *ResourceConfig) Get(k string) (interface{}, bool) { - // We aim to get a value from the configuration. If it is computed, - // then we return the pure raw value. - source := c.Config - if c.IsComputed(k) { - source = c.Raw - } - - return c.get(k, source) -} - -// GetRaw looks up a configuration value by key and returns the value, -// from the raw, uninterpolated config. -// -// The second return value is true if the get was successful. Get will -// not succeed if the value is being computed. -func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { - return c.get(k, c.Raw) -} - -// IsComputed returns whether the given key is computed or not. -func (c *ResourceConfig) IsComputed(k string) bool { - // The next thing we do is check the config if we get a computed - // value out of it. - v, ok := c.get(k, c.Config) - if !ok { - return false - } - - // If value is nil, then it isn't computed - if v == nil { - return false - } - - // Test if the value contains an unknown value - var w unknownCheckWalker - if err := reflectwalk.Walk(v, &w); err != nil { - panic(err) - } - - return w.Unknown -} - -// IsSet checks if the key in the configuration is set. A key is set if -// it has a value or the value is being computed (is unknown currently). -// -// This function should be used rather than checking the keys of the -// raw configuration itself, since a key may be omitted from the raw -// configuration if it is being computed. -func (c *ResourceConfig) IsSet(k string) bool { - if c == nil { - return false - } - - if c.IsComputed(k) { - return true - } - - if _, ok := c.Get(k); ok { - return true - } - - return false -} - -func (c *ResourceConfig) get( - k string, raw map[string]interface{}) (interface{}, bool) { - parts := strings.Split(k, ".") - if len(parts) == 1 && parts[0] == "" { - parts = nil - } - - var current interface{} = raw - var previous interface{} = nil - for i, part := range parts { - if current == nil { - return nil, false - } - - cv := reflect.ValueOf(current) - switch cv.Kind() { - case reflect.Map: - previous = current - v := cv.MapIndex(reflect.ValueOf(part)) - if !v.IsValid() { - if i > 0 && i != (len(parts)-1) { - tryKey := strings.Join(parts[i:], ".") - v := cv.MapIndex(reflect.ValueOf(tryKey)) - if !v.IsValid() { - return nil, false - } - - return v.Interface(), true - } - - return nil, false - } - - current = v.Interface() - case reflect.Slice: - previous = current - - if part == "#" { - // If any value in a list is computed, this whole thing - // is computed and we can't read any part of it. - for i := 0; i < cv.Len(); i++ { - if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { - return v, true - } - } - - current = cv.Len() - } else { - i, err := strconv.ParseInt(part, 0, 0) - if err != nil { - return nil, false - } - if int(i) < 0 || int(i) >= cv.Len() { - return nil, false - } - current = cv.Index(int(i)).Interface() - } - case reflect.String: - // This happens when map keys contain "." and have a common - // prefix so were split as path components above. - actualKey := strings.Join(parts[i-1:], ".") - if prevMap, ok := previous.(map[string]interface{}); ok { - v, ok := prevMap[actualKey] - return v, ok - } - - return nil, false - default: - panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) - } - } - - return current, true -} - -// interpolateForce is a temporary thing. We want to get rid of interpolate -// above and likewise this, but it can only be done after the f-ast-graph -// refactor is complete. -func (c *ResourceConfig) interpolateForce() { - if c.raw == nil { - // If we don't have a lowercase "raw" but we _do_ have the uppercase - // Raw populated then this indicates that we're recieving a shim - // ResourceConfig created by NewResourceConfigShimmed, which is already - // fully evaluated and thus this function doesn't need to do anything. - if c.Raw != nil { - return - } - - var err error - c.raw, err = config.NewRawConfig(make(map[string]interface{})) - if err != nil { - panic(err) - } - } - - c.ComputedKeys = c.raw.UnknownKeys() - c.Raw = c.raw.RawMap() - c.Config = c.raw.Config() -} - -// unknownCheckWalker -type unknownCheckWalker struct { - Unknown bool -} - -func (w *unknownCheckWalker) Primitive(v reflect.Value) error { - if v.Interface() == hcl2shim.UnknownVariableValue { - w.Unknown = true - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go deleted file mode 100644 index ca833fe1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ /dev/null @@ -1,618 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// ResourceAddress is a way of identifying an individual resource (or, -// eventually, a subset of resources) within the state. It is used for Targets. -type ResourceAddress struct { - // Addresses a resource falling somewhere in the module path - // When specified alone, addresses all resources within a module path - Path []string - - // Addresses a specific resource that occurs in a list - Index int - - InstanceType InstanceType - InstanceTypeSet bool - Name string - Type string - Mode ResourceMode // significant only if InstanceTypeSet -} - -// Copy returns a copy of this ResourceAddress -func (r *ResourceAddress) Copy() *ResourceAddress { - if r == nil { - return nil - } - - n := &ResourceAddress{ - Path: make([]string, 0, len(r.Path)), - Index: r.Index, - InstanceType: r.InstanceType, - Name: r.Name, - Type: r.Type, - Mode: r.Mode, - } - - n.Path = append(n.Path, r.Path...) - - return n -} - -// String outputs the address that parses into this address. -func (r *ResourceAddress) String() string { - var result []string - for _, p := range r.Path { - result = append(result, "module", p) - } - - switch r.Mode { - case ManagedResourceMode: - // nothing to do - case DataResourceMode: - result = append(result, "data") - default: - panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) - } - - if r.Type != "" { - result = append(result, r.Type) - } - - if r.Name != "" { - name := r.Name - if r.InstanceTypeSet { - switch r.InstanceType { - case TypePrimary: - name += ".primary" - case TypeDeposed: - name += ".deposed" - case TypeTainted: - name += ".tainted" - } - } - - if r.Index >= 0 { - name += fmt.Sprintf("[%d]", r.Index) - } - result = append(result, name) - } - - return strings.Join(result, ".") -} - -// HasResourceSpec returns true if the address has a resource spec, as -// defined in the documentation: -// https://www.terraform.io/docs/internals/resource-addressing.html -// In particular, this returns false if the address contains only -// a module path, thus addressing the entire module. -func (r *ResourceAddress) HasResourceSpec() bool { - return r.Type != "" && r.Name != "" -} - -// WholeModuleAddress returns the resource address that refers to all -// resources in the same module as the receiver address. -func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { - return &ResourceAddress{ - Path: r.Path, - Index: -1, - InstanceTypeSet: false, - } -} - -// MatchesResourceConfig returns true if the receiver matches the given -// configuration resource within the given _static_ module path. Note that -// the module path in a resource address is a _dynamic_ module path, and -// multiple dynamic resource paths may map to a single static path if -// count and for_each are in use on module calls. -// -// Since resource configuration blocks represent all of the instances of -// a multi-instance resource, the index of the address (if any) is not -// considered. -func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { - if r.HasResourceSpec() { - // FIXME: Some ugliness while we are between worlds. Functionality - // in "addrs" should eventually replace this ResourceAddress idea - // completely, but for now we'll need to translate to the old - // way of representing resource modes. - switch r.Mode { - case ManagedResourceMode: - if rc.Mode != addrs.ManagedResourceMode { - return false - } - case DataResourceMode: - if rc.Mode != addrs.DataResourceMode { - return false - } - } - if r.Type != rc.Type || r.Name != rc.Name { - return false - } - } - - addrPath := r.Path - - // normalize - if len(addrPath) == 0 { - addrPath = nil - } - if len(path) == 0 { - path = nil - } - rawPath := []string(path) - return reflect.DeepEqual(addrPath, rawPath) -} - -// stateId returns the ID that this resource should be entered with -// in the state. This is also used for diffs. In the future, we'd like to -// move away from this string field so I don't export this. -func (r *ResourceAddress) stateId() string { - result := fmt.Sprintf("%s.%s", r.Type, r.Name) - switch r.Mode { - case ManagedResourceMode: - // Done - case DataResourceMode: - result = fmt.Sprintf("data.%s", result) - default: - panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) - } - if r.Index >= 0 { - result += fmt.Sprintf(".%d", r.Index) - } - - return result -} - -// parseResourceAddressInternal parses the somewhat bespoke resource -// identifier used in states and diffs, such as "instance.name.0". -func parseResourceAddressInternal(s string) (*ResourceAddress, error) { - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - mode := ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - mode = DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && mode != DataResourceMode { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - addr := &ResourceAddress{ - Type: parts[0], - Name: parts[1], - Index: -1, - InstanceType: TypePrimary, - Mode: mode, - } - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) - } - - addr.Index = int(idx) - } - - return addr, nil -} - -func ParseResourceAddress(s string) (*ResourceAddress, error) { - matches, err := tokenizeResourceAddress(s) - if err != nil { - return nil, err - } - mode := ManagedResourceMode - if matches["data_prefix"] != "" { - mode = DataResourceMode - } - resourceIndex, err := ParseResourceIndex(matches["index"]) - if err != nil { - return nil, err - } - instanceType, err := ParseInstanceType(matches["instance_type"]) - if err != nil { - return nil, err - } - path := ParseResourcePath(matches["path"]) - - // not allowed to say "data." without a type following - if mode == DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf( - "invalid resource address %q: must target specific data instance", - s, - ) - } - - return &ResourceAddress{ - Path: path, - Index: resourceIndex, - InstanceType: instanceType, - InstanceTypeSet: matches["instance_type"] != "", - Name: matches["name"], - Type: matches["type"], - Mode: mode, - }, nil -} - -// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a -// resource name as described in a module diff. -// -// For historical reasons a different addressing format is used in this -// context. The internal format should not be shown in the UI and instead -// this function should be used to translate to a ResourceAddress and -// then, where appropriate, use the String method to produce a canonical -// resource address string for display in the UI. -// -// The given path slice must be empty (or nil) for the root module, and -// otherwise consist of a sequence of module names traversing down into -// the module tree. If a non-nil path is provided, the caller must not -// modify its underlying array after passing it to this function. -func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { - addr, err := parseResourceAddressInternal(key) - if err != nil { - return nil, err - } - addr.Path = path - return addr, nil -} - -// NewLegacyResourceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Type, - Name: addr.Resource.Name, - } - - switch addr.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - ret.Index = -1 - - return ret -} - -// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Resource.Type, - Name: addr.Resource.Resource.Name, - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - - if addr.Resource.Key == addrs.NoKey { - ret.Index = -1 - } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { - ret.Index = int(ik) - } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { - ret.Index = -1 - } else { - panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) - } - - return ret -} - -// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to -// the new resource address type addrs.AbsResourceInstance. -// -// This method can be used only on an address that has a resource specification. -// It will panic if called on a module-path-only ResourceAddress. Use -// method HasResourceSpec to check before calling, in contexts where it is -// unclear. -// -// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" -// states, and so if these are present on the receiver then they are discarded. -// -// This is provided for shimming purposes so that we can easily adapt functions -// that are returning the legacy ResourceAddress type, for situations where -// the new type is required. -func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { - if !addr.HasResourceSpec() { - panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") - } - - ret := addrs.AbsResourceInstance{ - Module: addr.ModuleInstanceAddr(), - Resource: addrs.ResourceInstance{ - Resource: addrs.Resource{ - Type: addr.Type, - Name: addr.Name, - }, - }, - } - - switch addr.Mode { - case ManagedResourceMode: - ret.Resource.Resource.Mode = addrs.ManagedResourceMode - case DataResourceMode: - ret.Resource.Resource.Mode = addrs.DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) - } - - if addr.Index != -1 { - ret.Resource.Key = addrs.IntKey(addr.Index) - } - - return ret -} - -// ModuleInstanceAddr returns the module path portion of the receiver as a -// addrs.ModuleInstance value. -func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { - path := make(addrs.ModuleInstance, len(addr.Path)) - for i, name := range addr.Path { - path[i] = addrs.ModuleInstanceStep{Name: name} - } - return path -} - -// Contains returns true if and only if the given node is contained within -// the receiver. -// -// Containment is defined in terms of the module and resource heirarchy: -// a resource is contained within its module and any ancestor modules, -// an indexed resource instance is contained with the unindexed resource, etc. -func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { - ourPath := addr.Path - givenPath := other.Path - if len(givenPath) < len(ourPath) { - return false - } - for i := range ourPath { - if ourPath[i] != givenPath[i] { - return false - } - } - - // If the receiver is a whole-module address then the path prefix - // matching is all we need. - if !addr.HasResourceSpec() { - return true - } - - if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { - return false - } - - if addr.Index != -1 && addr.Index != other.Index { - return false - } - - if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { - return false - } - - return true -} - -// Equals returns true if the receiver matches the given address. -// -// The name of this method is a misnomer, since it doesn't test for exact -// equality. Instead, it tests that the _specified_ parts of each -// address match, treating any unspecified parts as wildcards. -// -// See also Contains, which takes a more heirarchical approach to comparing -// addresses. -func (addr *ResourceAddress) Equals(raw interface{}) bool { - other, ok := raw.(*ResourceAddress) - if !ok { - return false - } - - pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || - reflect.DeepEqual(addr.Path, other.Path) - - indexMatch := addr.Index == -1 || - other.Index == -1 || - addr.Index == other.Index - - nameMatch := addr.Name == "" || - other.Name == "" || - addr.Name == other.Name - - typeMatch := addr.Type == "" || - other.Type == "" || - addr.Type == other.Type - - // mode is significant only when type is set - modeMatch := addr.Type == "" || - other.Type == "" || - addr.Mode == other.Mode - - return pathMatch && - indexMatch && - addr.InstanceType == other.InstanceType && - nameMatch && - typeMatch && - modeMatch -} - -// Less returns true if and only if the receiver should be sorted before -// the given address when presenting a list of resource addresses to -// an end-user. -// -// This sort uses lexicographic sorting for most components, but uses -// numeric sort for indices, thus causing index 10 to sort after -// index 9, rather than after index 1. -func (addr *ResourceAddress) Less(other *ResourceAddress) bool { - - switch { - - case len(addr.Path) != len(other.Path): - return len(addr.Path) < len(other.Path) - - case !reflect.DeepEqual(addr.Path, other.Path): - // If the two paths are the same length but don't match, we'll just - // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn, and lexicographic - // comparison is correct for the module path portion. - addrStr := addr.String() - otherStr := other.String() - return addrStr < otherStr - - case addr.Mode != other.Mode: - return addr.Mode == DataResourceMode - - case addr.Type != other.Type: - return addr.Type < other.Type - - case addr.Name != other.Name: - return addr.Name < other.Name - - case addr.Index != other.Index: - // Since "Index" is -1 for an un-indexed address, this also conveniently - // sorts unindexed addresses before indexed ones, should they both - // appear for some reason. - return addr.Index < other.Index - - case addr.InstanceTypeSet != other.InstanceTypeSet: - return !addr.InstanceTypeSet - - case addr.InstanceType != other.InstanceType: - // InstanceType is actually an enum, so this is just an arbitrary - // sort based on the enum numeric values, and thus not particularly - // meaningful. - return addr.InstanceType < other.InstanceType - - default: - return false - - } -} - -func ParseResourceIndex(s string) (int, error) { - if s == "" { - return -1, nil - } - return strconv.Atoi(s) -} - -func ParseResourcePath(s string) []string { - if s == "" { - return nil - } - parts := strings.Split(s, ".") - path := make([]string, 0, len(parts)) - for _, s := range parts { - // Due to the limitations of the regexp match below, the path match has - // some noise in it we have to filter out :| - if s == "" || s == "module" { - continue - } - path = append(path, s) - } - return path -} - -func ParseInstanceType(s string) (InstanceType, error) { - switch s { - case "", "primary": - return TypePrimary, nil - case "deposed": - return TypeDeposed, nil - case "tainted": - return TypeTainted, nil - default: - return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) - } -} - -func tokenizeResourceAddress(s string) (map[string]string, error) { - // Example of portions of the regexp below using the - // string "aws_instance.web.tainted[1]" - re := regexp.MustCompile(`\A` + - // "module.foo.module.bar" (optional) - `(?P(?:module\.(?P[^.]+)\.?)*)` + - // possibly "data.", if targeting is a data resource - `(?P(?:data\.)?)` + - // "aws_instance.web" (optional when module path specified) - `(?:(?P[^.]+)\.(?P[^.[]+))?` + - // "tainted" (optional, omission implies: "primary") - `(?:\.(?P\w+))?` + - // "1" (optional, omission implies: "0") - `(?:\[(?P\d+)\])?` + - `\z`) - - groupNames := re.SubexpNames() - rawMatches := re.FindAllStringSubmatch(s, -1) - if len(rawMatches) != 1 { - return nil, fmt.Errorf("invalid resource address %q", s) - } - - matches := make(map[string]string) - for i, m := range rawMatches[0] { - matches[groupNames[i]] = m - } - - return matches, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform/terraform/resource_mode.go deleted file mode 100644 index c83643a6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_mode.go +++ /dev/null @@ -1,12 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go - -// ResourceMode is deprecated, use addrs.ResourceMode instead. -// It has been preserved for backwards compatibility. -type ResourceMode int - -const ( - ManagedResourceMode ResourceMode = iota - DataResourceMode -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/terraform/resource_mode_string.go deleted file mode 100644 index ba84346a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_mode_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ManagedResourceMode-0] - _ = x[DataResourceMode-1] -} - -const _ResourceMode_name = "ManagedResourceModeDataResourceMode" - -var _ResourceMode_index = [...]uint8{0, 19, 35} - -func (i ResourceMode) String() string { - if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go deleted file mode 100644 index dccfec68..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ /dev/null @@ -1,236 +0,0 @@ -package terraform - -// ResourceProvider is a legacy interface for providers. -// -// This is retained only for compatibility with legacy code. The current -// interface for providers is providers.Interface, in the sibling directory -// named "providers". -type ResourceProvider interface { - /********************************************************************* - * Functions related to the provider - *********************************************************************/ - - // ProviderSchema returns the config schema for the main provider - // configuration, as would appear in a "provider" block in the - // configuration files. - // - // Currently not all providers support schema. Callers must therefore - // first call Resources and DataSources and ensure that at least one - // resource or data source has the SchemaAvailable flag set. - GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) - - // Input was used prior to v0.12 to ask the provider to prompt the user - // for input to complete the configuration. - // - // From v0.12 onwards this method is never called because Terraform Core - // is able to handle the necessary input logic itself based on the - // schema returned from GetSchema. - Input(UIInput, *ResourceConfig) (*ResourceConfig, error) - - // Validate is called once at the beginning with the raw configuration - // (no interpolation done) and can return a list of warnings and/or - // errors. - // - // This is called once with the provider configuration only. It may not - // be called at all if no provider configuration is given. - // - // This should not assume that any values of the configurations are valid. - // The primary use case of this call is to check that required keys are - // set. - Validate(*ResourceConfig) ([]string, []error) - - // Configure configures the provider itself with the configuration - // given. This is useful for setting things like access keys. - // - // This won't be called at all if no provider configuration is given. - // - // Configure returns an error if it occurred. - Configure(*ResourceConfig) error - - // Resources returns all the available resource types that this provider - // knows how to manage. - Resources() []ResourceType - - // Stop is called when the provider should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - /********************************************************************* - * Functions related to individual resources - *********************************************************************/ - - // ValidateResource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateResource(string, *ResourceConfig) ([]string, []error) - - // Apply applies a diff to a specific resource and returns the new - // resource state along with an error. - // - // If the resource state given has an empty ID, then a new resource - // is expected to be created. - Apply( - *InstanceInfo, - *InstanceState, - *InstanceDiff) (*InstanceState, error) - - // Diff diffs a resource versus a desired state and returns - // a diff. - Diff( - *InstanceInfo, - *InstanceState, - *ResourceConfig) (*InstanceDiff, error) - - // Refresh refreshes a resource and updates all of its attributes - // with the latest information. - Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) - - /********************************************************************* - * Functions related to importing - *********************************************************************/ - - // ImportState requests that the given resource be imported. - // - // The returned InstanceState only requires ID be set. Importing - // will always call Refresh after the state to complete it. - // - // IMPORTANT: InstanceState doesn't have the resource type attached - // to it. A type must be specified on the state via the Ephemeral - // field on the state. - // - // This function can return multiple states. Normally, an import - // will map 1:1 to a physical resource. However, some resources map - // to multiple. For example, an AWS security group may contain many rules. - // Each rule is represented by a separate resource in Terraform, - // therefore multiple states are returned. - ImportState(*InstanceInfo, string) ([]*InstanceState, error) - - /********************************************************************* - * Functions related to data resources - *********************************************************************/ - - // ValidateDataSource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per data source instance. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateDataSource(string, *ResourceConfig) ([]string, []error) - - // DataSources returns all of the available data sources that this - // provider implements. - DataSources() []DataSource - - // ReadDataDiff produces a diff that represents the state that will - // be produced when the given data source is read using a later call - // to ReadDataApply. - ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - - // ReadDataApply initializes a data instance using the configuration - // in a diff produced by ReadDataDiff. - ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) -} - -// ResourceProviderCloser is an interface that providers that can close -// connections that aren't needed anymore must implement. -type ResourceProviderCloser interface { - Close() error -} - -// ResourceType is a type of resource that a resource provider can manage. -type ResourceType struct { - Name string // Name of the resource, example "instance" (no provider prefix) - Importable bool // Whether this resource supports importing - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// DataSource is a data source that a resource provider implements. -type DataSource struct { - Name string - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// ResourceProviderFactory is a function type that creates a new instance -// of a resource provider. -type ResourceProviderFactory func() (ResourceProvider, error) - -// ResourceProviderFactoryFixed is a helper that creates a -// ResourceProviderFactory that just returns some fixed provider. -func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { - return func() (ResourceProvider, error) { - return p, nil - } -} - -func ProviderHasResource(p ResourceProvider, n string) bool { - for _, rt := range p.Resources() { - if rt.Name == n { - return true - } - } - - return false -} - -func ProviderHasDataSource(p ResourceProvider, n string) bool { - for _, rt := range p.DataSources() { - if rt.Name == n { - return true - } - } - - return false -} - -const errPluginInit = ` -Plugin reinitialization required. Please run "terraform init". - -Plugins are external binaries that Terraform uses to access and manipulate -resources. The configuration provided requires plugins which can't be located, -don't satisfy the version constraints, or are otherwise incompatible. - -Terraform automatically discovers provider requirements from your -configuration, including providers used in child modules. To see the -requirements and constraints, run "terraform providers". - -%s -` diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go deleted file mode 100644 index 4000e3d2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ /dev/null @@ -1,315 +0,0 @@ -package terraform - -import ( - "sync" -) - -// MockResourceProvider implements ResourceProvider but mocks out all the -// calls for testing purposes. -type MockResourceProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - CloseCalled bool - CloseError error - GetSchemaCalled bool - GetSchemaRequest *ProviderSchemaRequest - GetSchemaReturn *ProviderSchema - GetSchemaReturnError error - InputCalled bool - InputInput UIInput - InputConfig *ResourceConfig - InputReturnConfig *ResourceConfig - InputReturnError error - InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) - ApplyCalled bool - ApplyInfo *InstanceInfo - ApplyState *InstanceState - ApplyDiff *InstanceDiff - ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) - ApplyReturn *InstanceState - ApplyReturnError error - ConfigureCalled bool - ConfigureConfig *ResourceConfig - ConfigureFn func(*ResourceConfig) error - ConfigureReturnError error - DiffCalled bool - DiffInfo *InstanceInfo - DiffState *InstanceState - DiffDesired *ResourceConfig - DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) - DiffReturn *InstanceDiff - DiffReturnError error - RefreshCalled bool - RefreshInfo *InstanceInfo - RefreshState *InstanceState - RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) - RefreshReturn *InstanceState - RefreshReturnError error - ResourcesCalled bool - ResourcesReturn []ResourceType - ReadDataApplyCalled bool - ReadDataApplyInfo *InstanceInfo - ReadDataApplyDiff *InstanceDiff - ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) - ReadDataApplyReturn *InstanceState - ReadDataApplyReturnError error - ReadDataDiffCalled bool - ReadDataDiffInfo *InstanceInfo - ReadDataDiffDesired *ResourceConfig - ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - ReadDataDiffReturn *InstanceDiff - ReadDataDiffReturnError error - StopCalled bool - StopFn func() error - StopReturnError error - DataSourcesCalled bool - DataSourcesReturn []DataSource - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(*ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateResourceCalled bool - ValidateResourceType string - ValidateResourceConfig *ResourceConfig - ValidateResourceReturnWarns []string - ValidateResourceReturnErrors []error - ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateDataSourceCalled bool - ValidateDataSourceType string - ValidateDataSourceConfig *ResourceConfig - ValidateDataSourceReturnWarns []string - ValidateDataSourceReturnErrors []error - - ImportStateCalled bool - ImportStateInfo *InstanceInfo - ImportStateID string - ImportStateReturn []*InstanceState - ImportStateReturnError error - ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) -} - -func (p *MockResourceProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} - -func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - p.GetSchemaRequest = req - return p.GetSchemaReturn, p.GetSchemaReturnError -} - -func (p *MockResourceProvider) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - p.Lock() - defer p.Unlock() - p.InputCalled = true - p.InputInput = input - p.InputConfig = c - if p.InputFn != nil { - return p.InputFn(input, c) - } - return p.InputReturnConfig, p.InputReturnError -} - -func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateResourceCalled = true - p.ValidateResourceType = t - p.ValidateResourceConfig = c - - if p.ValidateResourceFn != nil { - return p.ValidateResourceFn(t, c) - } - - return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors -} - -func (p *MockResourceProvider) Configure(c *ResourceConfig) error { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureConfig = c - - if p.ConfigureFn != nil { - return p.ConfigureFn(c) - } - - return p.ConfigureReturnError -} - -func (p *MockResourceProvider) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} - -func (p *MockResourceProvider) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // We only lock while writing data. Reading is fine - p.Lock() - p.ApplyCalled = true - p.ApplyInfo = info - p.ApplyState = state - p.ApplyDiff = diff - p.Unlock() - - if p.ApplyFn != nil { - return p.ApplyFn(info, state, diff) - } - - return p.ApplyReturn.DeepCopy(), p.ApplyReturnError -} - -func (p *MockResourceProvider) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.DiffCalled = true - p.DiffInfo = info - p.DiffState = state - p.DiffDesired = desired - - if p.DiffFn != nil { - return p.DiffFn(info, state, desired) - } - - return p.DiffReturn.DeepCopy(), p.DiffReturnError -} - -func (p *MockResourceProvider) Refresh( - info *InstanceInfo, - s *InstanceState) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.RefreshCalled = true - p.RefreshInfo = info - p.RefreshState = s - - if p.RefreshFn != nil { - return p.RefreshFn(info, s) - } - - return p.RefreshReturn.DeepCopy(), p.RefreshReturnError -} - -func (p *MockResourceProvider) Resources() []ResourceType { - p.Lock() - defer p.Unlock() - - p.ResourcesCalled = true - return p.ResourcesReturn -} - -func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ImportStateCalled = true - p.ImportStateInfo = info - p.ImportStateID = id - if p.ImportStateFn != nil { - return p.ImportStateFn(info, id) - } - - var result []*InstanceState - if p.ImportStateReturn != nil { - result = make([]*InstanceState, len(p.ImportStateReturn)) - for i, v := range p.ImportStateReturn { - result[i] = v.DeepCopy() - } - } - - return result, p.ImportStateReturnError -} - -func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceCalled = true - p.ValidateDataSourceType = t - p.ValidateDataSourceConfig = c - - if p.ValidateDataSourceFn != nil { - return p.ValidateDataSourceFn(t, c) - } - - return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors -} - -func (p *MockResourceProvider) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataDiffCalled = true - p.ReadDataDiffInfo = info - p.ReadDataDiffDesired = desired - if p.ReadDataDiffFn != nil { - return p.ReadDataDiffFn(info, desired) - } - - return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError -} - -func (p *MockResourceProvider) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataApplyCalled = true - p.ReadDataApplyInfo = info - p.ReadDataApplyDiff = d - - if p.ReadDataApplyFn != nil { - return p.ReadDataApplyFn(info, d) - } - - return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError -} - -func (p *MockResourceProvider) DataSources() []DataSource { - p.Lock() - defer p.Unlock() - - p.DataSourcesCalled = true - return p.DataSourcesReturn -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go deleted file mode 100644 index 2743dd7e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/provisioners" -) - -// ResourceProvisioner is an interface that must be implemented by any -// resource provisioner: the thing that initializes resources in -// a Terraform configuration. -type ResourceProvisioner interface { - // GetConfigSchema returns the schema for the provisioner type's main - // configuration block. This is called prior to Validate to enable some - // basic structural validation to be performed automatically and to allow - // the configuration to be properly extracted from potentially-ambiguous - // configuration file formats. - GetConfigSchema() (*configschema.Block, error) - - // Validate is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - Validate(*ResourceConfig) ([]string, []error) - - // Apply runs the provisioner on a specific resource and returns the new - // resource state along with an error. Instead of a diff, the ResourceConfig - // is provided since provisioners only run after a resource has been - // newly created. - Apply(UIOutput, *InstanceState, *ResourceConfig) error - - // Stop is called when the provisioner should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error -} - -// ResourceProvisionerCloser is an interface that provisioners that can close -// connections that aren't needed anymore must implement. -type ResourceProvisionerCloser interface { - Close() error -} - -// ResourceProvisionerFactory is a function type that creates a new instance -// of a resource provisioner. -type ResourceProvisionerFactory func() (ResourceProvisioner, error) - -// ProvisionerFactory is a function type that creates a new instance -// of a provisioners.Interface. -type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go deleted file mode 100644 index 7b88cf73..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go +++ /dev/null @@ -1,87 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// MockResourceProvisioner implements ResourceProvisioner but mocks out all the -// calls for testing purposes. -type MockResourceProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetConfigSchemaCalled bool - GetConfigSchemaReturnSchema *configschema.Block - GetConfigSchemaReturnError error - - ApplyCalled bool - ApplyOutput UIOutput - ApplyState *InstanceState - ApplyConfig *ResourceConfig - ApplyFn func(*InstanceState, *ResourceConfig) error - ApplyReturnError error - - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(c *ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - - StopCalled bool - StopFn func() error - StopReturnError error -} - -var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) - -func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { - p.GetConfigSchemaCalled = true - return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError -} - -func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvisioner) Apply( - output UIOutput, - state *InstanceState, - c *ResourceConfig) error { - p.Lock() - - p.ApplyCalled = true - p.ApplyOutput = output - p.ApplyState = state - p.ApplyConfig = c - if p.ApplyFn != nil { - fn := p.ApplyFn - p.Unlock() - return fn(state, c) - } - - defer p.Unlock() - return p.ApplyReturnError -} - -func (p *MockResourceProvisioner) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go deleted file mode 100644 index 15f6d5e7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/schemas.go +++ /dev/null @@ -1,285 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// Schemas is a container for various kinds of schema that Terraform needs -// during processing. -type Schemas struct { - Providers map[addrs.Provider]*ProviderSchema - Provisioners map[string]*configschema.Block -} - -// ProviderSchema returns the entire ProviderSchema object that was produced -// by the plugin for the given provider, or nil if no such schema is available. -// -// It's usually better to go use the more precise methods offered by type -// Schemas to handle this detail automatically. -func (ss *Schemas) ProviderSchema(provider addrs.Provider) *ProviderSchema { - if ss.Providers == nil { - return nil - } - return ss.Providers[provider] -} - -// ProviderConfig returns the schema for the provider configuration of the -// given provider type, or nil if no such schema is available. -func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { - ps := ss.ProviderSchema(provider) - if ps == nil { - return nil - } - return ps.Provider -} - -// ResourceTypeConfig returns the schema for the configuration of a given -// resource type belonging to a given provider type, or nil of no such -// schema is available. -// -// In many cases the provider type is inferrable from the resource type name, -// but this is not always true because users can override the provider for -// a resource using the "provider" meta-argument. Therefore it's important to -// always pass the correct provider name, even though it many cases it feels -// redundant. -func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { - ps := ss.ProviderSchema(provider) - if ps == nil || ps.ResourceTypes == nil { - return nil, 0 - } - return ps.SchemaForResourceType(resourceMode, resourceType) -} - -// ProvisionerConfig returns the schema for the configuration of a given -// provisioner, or nil of no such schema is available. -func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { - return ss.Provisioners[name] -} - -// LoadSchemas searches the given configuration, state and plan (any of which -// may be nil) for constructs that have an associated schema, requests the -// necessary schemas from the given component factory (which must _not_ be nil), -// and returns a single object representing all of the necessary schemas. -// -// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing -// errors across multiple separate objects. Errors here will usually indicate -// either misbehavior on the part of one of the providers or of the provider -// protocol itself. When returned with errors, the returned schemas object is -// still valid but may be incomplete. -func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { - schemas := &Schemas{ - Providers: map[addrs.Provider]*ProviderSchema{}, - Provisioners: map[string]*configschema.Block{}, - } - var diags tfdiags.Diagnostics - - newDiags := loadProviderSchemas(schemas.Providers, config, state, components) - diags = diags.Append(newDiags) - newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) - diags = diags.Append(newDiags) - - return schemas, diags.Err() -} - -func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(fqn addrs.Provider) { - name := fqn.String() - - if _, exists := schemas[fqn]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) - provider, err := components.ResourceProvider(fqn) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[fqn] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - provider.Close() - }() - - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[fqn] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - s := &ProviderSchema{ - Provider: resp.Provider.Block, - ResourceTypes: make(map[string]*configschema.Block), - DataSources: make(map[string]*configschema.Block), - - ResourceTypeSchemaVersions: make(map[string]uint64), - } - - if resp.Provider.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version provider configuration for provider %q", name), - ) - } - - for t, r := range resp.ResourceTypes { - s.ResourceTypes[t] = r.Block - s.ResourceTypeSchemaVersions[t] = uint64(r.Version) - if r.Version < 0 { - diags = diags.Append( - fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, name), - ) - } - } - - for t, d := range resp.DataSources { - s.DataSources[t] = d.Block - if d.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, name), - ) - } - } - - schemas[fqn] = s - - if resp.ProviderMeta.Block != nil { - s.ProviderMeta = resp.ProviderMeta.Block - } - } - - if config != nil { - for _, fqn := range config.ProviderTypes() { - ensure(fqn) - } - } - - if state != nil { - needed := providers.AddressedTypesAbs(state.ProviderAddrs()) - for _, typeAddr := range needed { - ensure(typeAddr) - } - } - - return diags -} - -func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(name string) { - if _, exists := schemas[name]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) - provisioner, err := components.ResourceProvisioner(name) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - if closer, ok := provisioner.(ResourceProvisionerCloser); ok { - closer.Close() - } - }() - - resp := provisioner.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - schemas[name] = resp.Provisioner - } - - if config != nil { - for _, rc := range config.Module.ManagedResources { - for _, pc := range rc.Managed.Provisioners { - ensure(pc.Type) - } - } - - // Must also visit our child modules, recursively. - for _, cc := range config.Children { - childDiags := loadProvisionerSchemas(schemas, cc, components) - diags = diags.Append(childDiags) - } - } - - return diags -} - -// ProviderSchema represents the schema for a provider's own configuration -// and the configuration for some or all of its resources and data sources. -// -// The completeness of this structure depends on how it was constructed. -// When constructed for a configuration, it will generally include only -// resource types and data sources used by that configuration. -type ProviderSchema struct { - Provider *configschema.Block - ProviderMeta *configschema.Block - ResourceTypes map[string]*configschema.Block - DataSources map[string]*configschema.Block - - ResourceTypeSchemaVersions map[string]uint64 -} - -// SchemaForResourceType attempts to find a schema for the given mode and type. -// Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { - switch mode { - case addrs.ManagedResourceMode: - return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] - case addrs.DataResourceMode: - // Data resources don't have schema versions right now, since state is discarded for each refresh - return ps.DataSources[typeName], 0 - default: - // Shouldn't happen, because the above cases are comprehensive. - return nil, 0 - } -} - -// SchemaForResourceAddr attempts to find a schema for the mode and type from -// the given resource address. Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { - return ps.SchemaForResourceType(addr.Mode, addr.Type) -} - -// ProviderSchemaRequest is used to describe to a ResourceProvider which -// aspects of schema are required, when calling the GetSchema method. -type ProviderSchemaRequest struct { - ResourceTypes []string - DataSources []string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go deleted file mode 100644 index 95c1e851..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ /dev/null @@ -1,2255 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/copystructure" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -const ( - // StateVersion is the current version for our state file - StateVersion = 3 -) - -// rootModulePath is the path of the root module -var rootModulePath = []string{"root"} - -// normalizeModulePath transforms a legacy module path (which may or may not -// have a redundant "root" label at the start of it) into an -// addrs.ModuleInstance representing the same module. -// -// For legacy reasons, different parts of Terraform disagree about whether the -// root module has the path []string{} or []string{"root"}, and so this -// function accepts both and trims off the "root". An implication of this is -// that it's not possible to actually have a module call in the root module -// that is itself named "root", since that would be ambiguous. -// -// normalizeModulePath takes a raw module path and returns a path that -// has the rootModulePath prepended to it. If I could go back in time I -// would've never had a rootModulePath (empty path would be root). We can -// still fix this but thats a big refactor that my branch doesn't make sense -// for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) addrs.ModuleInstance { - // FIXME: Remove this once everyone is using addrs.ModuleInstance. - - if len(p) > 0 && p[0] == "root" { - p = p[1:] - } - - ret := make(addrs.ModuleInstance, len(p)) - for i, name := range p { - // For now we don't actually support modules with multiple instances - // identified by keys, so we just treat every path element as a - // step with no key. - ret[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - return ret -} - -// State keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -type State struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *RemoteState `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *BackendState `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*ModuleState `json:"modules"` - - mu sync.Mutex -} - -func (s *State) Lock() { s.mu.Lock() } -func (s *State) Unlock() { s.mu.Unlock() } - -// NewState is used to initialize a blank state -func NewState() *State { - s := &State{} - s.init() - return s -} - -// Children returns the ModuleStates that are direct children of -// the given path. If the path is "root", for example, then children -// returned might be "root.child", but not "root.child.grandchild". -func (s *State) Children(path []string) []*ModuleState { - s.Lock() - defer s.Unlock() - // TODO: test - - return s.children(path) -} - -func (s *State) children(path []string) []*ModuleState { - result := make([]*ModuleState, 0) - for _, m := range s.Modules { - if m == nil { - continue - } - - if len(m.Path) != len(path)+1 { - continue - } - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - result = append(result, m) - } - - return result -} - -// AddModule adds the module with the given path to the state. -// -// This should be the preferred method to add module states since it -// allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { - s.Lock() - defer s.Unlock() - - return s.addModule(path) -} - -func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { - // check if the module exists first - m := s.moduleByPath(path) - if m != nil { - return m - } - - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - // For the purposes of state, the legacy address format also includes - // a redundant extra prefix element "root". It is important to include - // this because the "prune" method will remove any module that has a - // path length less than one, and other parts of the state code will - // trim off the first element indiscriminately. - legacyPath := make([]string, len(path)+1) - legacyPath[0] = "root" - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("state cannot represent modules with count or for_each keys") - } - - legacyPath[i+1] = step.Name - } - - m = &ModuleState{Path: legacyPath} - m.init() - s.Modules = append(s.Modules, m) - s.sort() - return m -} - -// ModuleByPath is used to lookup the module state for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { - if s == nil { - return nil - } - s.Lock() - defer s.Unlock() - - return s.moduleByPath(path) -} - -func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { - for _, mod := range s.Modules { - if mod == nil { - continue - } - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// Empty returns true if the state is empty. -func (s *State) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return len(s.Modules) == 0 -} - -// HasResources returns true if the state contains any resources. -// -// This is similar to !s.Empty, but returns true also in the case where the -// state has modules but all of them are devoid of resources. -func (s *State) HasResources() bool { - if s.Empty() { - return false - } - - for _, mod := range s.Modules { - if len(mod.Resources) > 0 { - return true - } - } - - return false -} - -// IsRemote returns true if State represents a state that exists and is -// remote. -func (s *State) IsRemote() bool { - if s == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Remote == nil { - return false - } - if s.Remote.Type == "" { - return false - } - - return true -} - -// Validate validates the integrity of this state file. -// -// Certain properties of the statefile are expected by Terraform in order -// to behave properly. The core of Terraform will assume that once it -// receives a State structure that it has been validated. This validation -// check should be called to ensure that. -// -// If this returns an error, then the user should be notified. The error -// response will include detailed information on the nature of the error. -func (s *State) Validate() error { - s.Lock() - defer s.Unlock() - - var result error - - // !!!! FOR DEVELOPERS !!!! - // - // Any errors returned from this Validate function will BLOCK TERRAFORM - // from loading a state file. Therefore, this should only contain checks - // that are only resolvable through manual intervention. - // - // !!!! FOR DEVELOPERS !!!! - - // Make sure there are no duplicate module states. We open a new - // block here so we can use basic variable names and future validations - // can do the same. - { - found := make(map[string]struct{}) - for _, ms := range s.Modules { - if ms == nil { - continue - } - - key := strings.Join(ms.Path, ".") - if _, ok := found[key]; ok { - result = multierror.Append(result, fmt.Errorf( - strings.TrimSpace(stateValidateErrMultiModule), key)) - continue - } - - found[key] = struct{}{} - } - } - - return result -} - -// Remove removes the item in the state at the given address, returning -// any errors that may have occurred. -// -// If the address references a module state or resource, it will delete -// all children as well. To check what will be deleted, use a StateFilter -// first. -func (s *State) Remove(addr ...string) error { - s.Lock() - defer s.Unlock() - - // Filter out what we need to delete - filter := &StateFilter{State: s} - results, err := filter.Filter(addr...) - if err != nil { - return err - } - - // If we have no results, just exit early, we're not going to do anything. - // While what happens below is fairly fast, this is an important early - // exit since the prune below might modify the state more and we don't - // want to modify the state if we don't have to. - if len(results) == 0 { - return nil - } - - // Go through each result and grab what we need - removed := make(map[interface{}]struct{}) - for _, r := range results { - // Convert the path to our own type - path := append([]string{"root"}, r.Path...) - - // If we removed this already, then ignore - if _, ok := removed[r.Value]; ok { - continue - } - - // If we removed the parent already, then ignore - if r.Parent != nil { - if _, ok := removed[r.Parent.Value]; ok { - continue - } - } - - // Add this to the removed list - removed[r.Value] = struct{}{} - - switch v := r.Value.(type) { - case *ModuleState: - s.removeModule(path, v) - case *ResourceState: - s.removeResource(path, v) - case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) - default: - return fmt.Errorf("unknown type to delete: %T", r.Value) - } - } - - // Prune since the removal functions often do the bare minimum to - // remove a thing and may leave around dangling empty modules, resources, - // etc. Prune will clean that all up. - s.prune() - - return nil -} - -func (s *State) removeModule(path []string, v *ModuleState) { - for i, m := range s.Modules { - if m == v { - s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil - return - } - } -} - -func (s *State) removeResource(path []string, v *ResourceState) { - // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(normalizeModulePath(path)) - if mod == nil { - return - } - - // Find this resource. This is a O(N) lookup when if we had the key - // it could be O(1) but even with thousands of resources this shouldn't - // matter right now. We can easily up performance here when the time comes. - for k, r := range mod.Resources { - if r == v { - // Found it - delete(mod.Resources, k) - return - } - } -} - -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { - // Go through the resource and find the instance that matches this - // (if any) and remove it. - - // Check primary - if r.Primary == v { - r.Primary = nil - return - } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } -} - -// RootModule returns the ModuleState for the root module -func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Equal tests if one state is equal to another. -func (s *State) Equal(other *State) bool { - // If one is nil, we do a direct check - if s == nil || other == nil { - return s == other - } - - s.Lock() - defer s.Unlock() - return s.equal(other) -} - -func (s *State) equal(other *State) bool { - if s == nil || other == nil { - return s == other - } - - // If the versions are different, they're certainly not equal - if s.Version != other.Version { - return false - } - - // If any of the modules are not equal, then this state isn't equal - if len(s.Modules) != len(other.Modules) { - return false - } - for _, m := range s.Modules { - // This isn't very optimal currently but works. - otherM := other.moduleByPath(normalizeModulePath(m.Path)) - if otherM == nil { - return false - } - - // If they're not equal, then we're not equal! - if !m.Equal(otherM) { - return false - } - } - - return true -} - -// MarshalEqual is similar to Equal but provides a stronger definition of -// "equal", where two states are equal if and only if their serialized form -// is byte-for-byte identical. -// -// This is primarily useful for callers that are trying to save snapshots -// of state to persistent storage, allowing them to detect when a new -// snapshot must be taken. -// -// Note that the serial number and lineage are included in the serialized form, -// so it's the caller's responsibility to properly manage these attributes -// so that this method is only called on two states that have the same -// serial and lineage, unless detecting such differences is desired. -func (s *State) MarshalEqual(other *State) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - recvBuf := &bytes.Buffer{} - otherBuf := &bytes.Buffer{} - - err := WriteState(s, recvBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - err = WriteState(other, otherBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) -} - -type StateAgeComparison int - -const ( - StateAgeEqual StateAgeComparison = 0 - StateAgeReceiverNewer StateAgeComparison = 1 - StateAgeReceiverOlder StateAgeComparison = -1 -) - -// CompareAges compares one state with another for which is "older". -// -// This is a simple check using the state's serial, and is thus only as -// reliable as the serial itself. In the normal case, only one state -// exists for a given combination of lineage/serial, but Terraform -// does not guarantee this and so the result of this method should be -// used with care. -// -// Returns an integer that is negative if the receiver is older than -// the argument, positive if the converse, and zero if they are equal. -// An error is returned if the two states are not of the same lineage, -// in which case the integer returned has no meaning. -func (s *State) CompareAges(other *State) (StateAgeComparison, error) { - // nil states are "older" than actual states - switch { - case s != nil && other == nil: - return StateAgeReceiverNewer, nil - case s == nil && other != nil: - return StateAgeReceiverOlder, nil - case s == nil && other == nil: - return StateAgeEqual, nil - } - - if !s.SameLineage(other) { - return StateAgeEqual, fmt.Errorf( - "can't compare two states of differing lineage", - ) - } - - s.Lock() - defer s.Unlock() - - switch { - case s.Serial < other.Serial: - return StateAgeReceiverOlder, nil - case s.Serial > other.Serial: - return StateAgeReceiverNewer, nil - default: - return StateAgeEqual, nil - } -} - -// SameLineage returns true only if the state given in argument belongs -// to the same "lineage" of states as the receiver. -func (s *State) SameLineage(other *State) bool { - s.Lock() - defer s.Unlock() - - // If one of the states has no lineage then it is assumed to predate - // this concept, and so we'll accept it as belonging to any lineage - // so that a lineage string can be assigned to newer versions - // without breaking compatibility with older versions. - if s.Lineage == "" || other.Lineage == "" { - return true - } - - return s.Lineage == other.Lineage -} - -// DeepCopy performs a deep copy of the state structure and returns -// a new structure. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*State) -} - -// FromFutureTerraform checks if this state was written by a Terraform -// version from the future. -func (s *State) FromFutureTerraform() bool { - s.Lock() - defer s.Unlock() - - // No TF version means it is certainly from the past - if s.TFVersion == "" { - return false - } - - v := version.Must(version.NewVersion(s.TFVersion)) - return tfversion.SemVer.LessThan(v) -} - -func (s *State) Init() { - s.Lock() - defer s.Unlock() - s.init() -} - -func (s *State) init() { - if s.Version == 0 { - s.Version = StateVersion - } - - if s.moduleByPath(addrs.RootModuleInstance) == nil { - s.addModule(addrs.RootModuleInstance) - } - s.ensureHasLineage() - - for _, mod := range s.Modules { - if mod != nil { - mod.init() - } - } - - if s.Remote != nil { - s.Remote.init() - } - -} - -func (s *State) EnsureHasLineage() { - s.Lock() - defer s.Unlock() - - s.ensureHasLineage() -} - -func (s *State) ensureHasLineage() { - if s.Lineage == "" { - lineage, err := uuid.GenerateUUID() - if err != nil { - panic(fmt.Errorf("Failed to generate lineage: %v", err)) - } - s.Lineage = lineage - log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) - } else { - log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) - } -} - -// AddModuleState insert this module state and override any existing ModuleState -func (s *State) AddModuleState(mod *ModuleState) { - mod.init() - s.Lock() - defer s.Unlock() - - s.addModuleState(mod) -} - -func (s *State) addModuleState(mod *ModuleState) { - for i, m := range s.Modules { - if reflect.DeepEqual(m.Path, mod.Path) { - s.Modules[i] = mod - return - } - } - - s.Modules = append(s.Modules, mod) - s.sort() -} - -// prune is used to remove any resources that are no longer required -func (s *State) prune() { - if s == nil { - return - } - - // Filter out empty modules. - // A module is always assumed to have a path, and it's length isn't always - // bounds checked later on. Modules may be "emptied" during destroy, but we - // never want to store those in the state. - for i := 0; i < len(s.Modules); i++ { - if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { - s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) - i-- - } - } - - for _, mod := range s.Modules { - mod.prune() - } - if s.Remote != nil && s.Remote.Empty() { - s.Remote = nil - } -} - -// sort sorts the modules -func (s *State) sort() { - sort.Sort(moduleStateSort(s.Modules)) - - // Allow modules to be sorted - for _, m := range s.Modules { - if m != nil { - m.sort() - } - } -} - -func (s *State) String() string { - if s == nil { - return "" - } - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - for _, m := range s.Modules { - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// BackendState stores the configuration to connect to a remote backend. -type BackendState struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} - -// Empty returns true if BackendState has no state. -func (s *BackendState) Empty() bool { - return s == nil || s.Type == "" -} - -// Config decodes the type-specific configuration object using the provided -// schema and returns the result as a cty.Value. -// -// An error is returned if the stored configuration does not conform to the -// given schema. -func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { - ty := schema.ImpliedType() - if s == nil { - return cty.NullVal(ty), nil - } - return ctyjson.Unmarshal(s.ConfigRaw, ty) -} - -// SetConfig replaces (in-place) the type-specific configuration object using -// the provided value and associated schema. -// -// An error is returned if the given value does not conform to the implied -// type of the schema. -func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { - ty := schema.ImpliedType() - buf, err := ctyjson.Marshal(val, ty) - if err != nil { - return err - } - s.ConfigRaw = buf - return nil -} - -// ForPlan produces an alternative representation of the reciever that is -// suitable for storing in a plan. The current workspace must additionally -// be provided, to be stored alongside the backend configuration. -// -// The backend configuration schema is required in order to properly -// encode the backend-specific configuration settings. -func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { - if s == nil { - return nil, nil - } - - configVal, err := s.Config(schema) - if err != nil { - return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) - } - return plans.NewBackend(s.Type, configVal, schema, workspaceName) -} - -// RemoteState is used to track the information about a remote -// state store that we push/pull state to. -type RemoteState struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` - - mu sync.Mutex -} - -func (s *RemoteState) Lock() { s.mu.Lock() } -func (s *RemoteState) Unlock() { s.mu.Unlock() } - -func (r *RemoteState) init() { - r.Lock() - defer r.Unlock() - - if r.Config == nil { - r.Config = make(map[string]string) - } -} - -func (r *RemoteState) deepcopy() *RemoteState { - r.Lock() - defer r.Unlock() - - confCopy := make(map[string]string, len(r.Config)) - for k, v := range r.Config { - confCopy[k] = v - } - return &RemoteState{ - Type: r.Type, - Config: confCopy, - } -} - -func (r *RemoteState) Empty() bool { - if r == nil { - return true - } - r.Lock() - defer r.Unlock() - - return r.Type == "" -} - -func (r *RemoteState) Equals(other *RemoteState) bool { - r.Lock() - defer r.Unlock() - - if r.Type != other.Type { - return false - } - if len(r.Config) != len(other.Config) { - return false - } - for k, v := range r.Config { - if other.Config[k] != v { - return false - } - } - return true -} - -// OutputState is used to track the state relevant to a single output. -type OutputState struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -func (s *OutputState) Lock() { s.mu.Lock() } -func (s *OutputState) Unlock() { s.mu.Unlock() } - -func (s *OutputState) String() string { - return fmt.Sprintf("%#v", s.Value) -} - -// Equal compares two OutputState structures for equality. nil values are -// considered equal. -func (s *OutputState) Equal(other *OutputState) bool { - if s == nil && other == nil { - return true - } - - if s == nil || other == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Sensitive != other.Sensitive { - return false - } - - if !reflect.DeepEqual(s.Value, other.Value) { - return false - } - - return true -} - -func (s *OutputState) deepcopy() *OutputState { - if s == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(fmt.Errorf("Error copying output value: %s", err)) - } - - return stateCopy.(*OutputState) -} - -// ModuleState is used to track all the state relevant to a single -// module. Previous to Terraform 0.3, all state belonged to the "root" -// module. -type ModuleState struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*OutputState `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*ResourceState `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - mu sync.Mutex -} - -func (s *ModuleState) Lock() { s.mu.Lock() } -func (s *ModuleState) Unlock() { s.mu.Unlock() } - -// Equal tests whether one module state is equal to another. -func (m *ModuleState) Equal(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - // Paths must be equal - if !reflect.DeepEqual(m.Path, other.Path) { - return false - } - - // Outputs must be equal - if len(m.Outputs) != len(other.Outputs) { - return false - } - for k, v := range m.Outputs { - if !other.Outputs[k].Equal(v) { - return false - } - } - - // Dependencies must be equal. This sorts these in place but - // this shouldn't cause any problems. - sort.Strings(m.Dependencies) - sort.Strings(other.Dependencies) - if len(m.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range m.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // Resources must be equal - if len(m.Resources) != len(other.Resources) { - return false - } - for k, r := range m.Resources { - otherR, ok := other.Resources[k] - if !ok { - return false - } - - if !r.Equal(otherR) { - return false - } - } - - return true -} - -// IsRoot says whether or not this module diff is for the root module. -func (m *ModuleState) IsRoot() bool { - m.Lock() - defer m.Unlock() - return reflect.DeepEqual(m.Path, rootModulePath) -} - -// IsDescendent returns true if other is a descendent of this module. -func (m *ModuleState) IsDescendent(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - i := len(m.Path) - return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) -} - -// Orphans returns a list of keys of resources that are in the State -// but aren't present in the configuration itself. Hence, these keys -// represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { - m.Lock() - defer m.Unlock() - - inConfig := make(map[string]struct{}) - if c != nil { - for _, r := range c.ManagedResources { - inConfig[r.Addr().String()] = struct{}{} - } - for _, r := range c.DataResources { - inConfig[r.Addr().String()] = struct{}{} - } - } - - var result []addrs.ResourceInstance - for k := range m.Resources { - // Since we've not yet updated state to use our new address format, - // we need to do some shimming here. - legacyAddr, err := parseResourceAddressInternal(k) - if err != nil { - // Suggests that the user tampered with the state, since we always - // generate valid internal addresses. - log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) - continue - } - - addr := legacyAddr.AbsResourceInstanceAddr().Resource - compareKey := addr.Resource.String() // compare by resource address, ignoring instance key - if _, exists := inConfig[compareKey]; !exists { - result = append(result, addr) - } - } - return result -} - -// RemovedOutputs returns a list of outputs that are in the State but aren't -// present in the configuration itself. -func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { - if outputs == nil { - // If we got no output map at all then we'll just treat our set of - // configured outputs as empty, since that suggests that they've all - // been removed by removing their containing module. - outputs = make(map[string]*configs.Output) - } - - s.Lock() - defer s.Unlock() - - var ret []addrs.OutputValue - for n := range s.Outputs { - if _, declared := outputs[n]; !declared { - ret = append(ret, addrs.OutputValue{ - Name: n, - }) - } - } - - return ret -} - -// View returns a view with the given resource prefix. -func (m *ModuleState) View(id string) *ModuleState { - if m == nil { - return m - } - - r := m.deepcopy() - for k, _ := range r.Resources { - if id == k || strings.HasPrefix(k, id+".") { - continue - } - - delete(r.Resources, k) - } - - return r -} - -func (m *ModuleState) init() { - m.Lock() - defer m.Unlock() - - if m.Path == nil { - m.Path = []string{} - } - if m.Outputs == nil { - m.Outputs = make(map[string]*OutputState) - } - if m.Resources == nil { - m.Resources = make(map[string]*ResourceState) - } - - if m.Dependencies == nil { - m.Dependencies = make([]string, 0) - } - - for _, rs := range m.Resources { - rs.init() - } -} - -func (m *ModuleState) deepcopy() *ModuleState { - if m == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - - return stateCopy.(*ModuleState) -} - -// prune is used to remove any resources that are no longer required -func (m *ModuleState) prune() { - m.Lock() - defer m.Unlock() - - for k, v := range m.Resources { - if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { - delete(m.Resources, k) - continue - } - - v.prune() - } - - for k, v := range m.Outputs { - if v.Value == hcl2shim.UnknownVariableValue { - delete(m.Outputs, k) - } - } - - m.Dependencies = uniqueStrings(m.Dependencies) -} - -func (m *ModuleState) sort() { - for _, v := range m.Resources { - v.sort() - } -} - -func (m *ModuleState) String() string { - m.Lock() - defer m.Unlock() - - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - names := make([]string, 0, len(m.Resources)) - for name, _ := range m.Resources { - names = append(names, name) - } - - sort.Sort(resourceNameSort(names)) - - for _, k := range names { - rs := m.Resources[k] - var id string - if rs.Primary != nil { - id = rs.Primary.ID - } - if id == "" { - id = "" - } - - taintStr := "" - if rs.Primary.Tainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(rs.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - if rs.Provider != "" { - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) - } - - var attributes map[string]string - if rs.Primary != nil { - attributes = rs.Primary.Attributes - } - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - for idx, t := range rs.Deposed { - taintStr := "" - if t.Tainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) - } - - if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range rs.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep)) - } - } - } - - if len(m.Outputs) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.Outputs)) - for k, _ := range m.Outputs { - ks = append(ks, k) - } - - sort.Strings(ks) - - for _, k := range ks { - v := m.Outputs[k] - switch vTyped := v.Value.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key, _ := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - } - } - } - - return buf.String() -} - -func (m *ModuleState) Empty() bool { - return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 -} - -// ResourceStateKey is a structured representation of the key used for the -// ModuleState.Resources mapping -type ResourceStateKey struct { - Name string - Type string - Mode ResourceMode - Index int -} - -// Equal determines whether two ResourceStateKeys are the same -func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { - if rsk == nil || other == nil { - return false - } - if rsk.Mode != other.Mode { - return false - } - if rsk.Type != other.Type { - return false - } - if rsk.Name != other.Name { - return false - } - if rsk.Index != other.Index { - return false - } - return true -} - -func (rsk *ResourceStateKey) String() string { - if rsk == nil { - return "" - } - var prefix string - switch rsk.Mode { - case ManagedResourceMode: - prefix = "" - case DataResourceMode: - prefix = "data." - default: - panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) - } - if rsk.Index == -1 { - return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) - } - return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) -} - -// ParseResourceStateKey accepts a key in the format used by -// ModuleState.Resources and returns a resource name and resource index. In the -// state, a resource has the format "type.name.index" or "type.name". In the -// latter case, the index is returned as -1. -func ParseResourceStateKey(k string) (*ResourceStateKey, error) { - parts := strings.Split(k, ".") - mode := ManagedResourceMode - if len(parts) > 0 && parts[0] == "data" { - mode = DataResourceMode - // Don't need the constant "data" prefix for parsing - // now that we've figured out the mode. - parts = parts[1:] - } - if len(parts) < 2 || len(parts) > 3 { - return nil, fmt.Errorf("Malformed resource state key: %s", k) - } - rsk := &ResourceStateKey{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Index: -1, - } - if len(parts) == 3 { - index, err := strconv.Atoi(parts[2]) - if err != nil { - return nil, fmt.Errorf("Malformed resource state key index: %s", k) - } - rsk.Index = index - } - return rsk, nil -} - -// ResourceState holds the state of a resource that is used so that -// a provider can find and manage an existing resource as well as for -// storing attributes that are used to populate variables of child -// resources. -// -// Attributes has attributes about the created resource that are -// queryable in interpolation: "${type.id.attr}" -// -// Extra is just extra data that a provider can return that we store -// for later, but is not exposed in any way to the user. -// -type ResourceState struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *InstanceState `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*InstanceState `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -func (s *ResourceState) Lock() { s.mu.Lock() } -func (s *ResourceState) Unlock() { s.mu.Unlock() } - -// Equal tests whether two ResourceStates are equal. -func (s *ResourceState) Equal(other *ResourceState) bool { - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Provider != other.Provider { - return false - } - - // Dependencies must be equal - sort.Strings(s.Dependencies) - sort.Strings(other.Dependencies) - if len(s.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range s.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true -} - -// Taint marks a resource as tainted. -func (s *ResourceState) Taint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = true - } -} - -// Untaint unmarks a resource as tainted. -func (s *ResourceState) Untaint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = false - } -} - -// ProviderAddr returns the provider address for the receiver, by parsing the -// string representation saved in state. An error can be returned if the -// value in state is corrupt. -func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { - var diags tfdiags.Diagnostics - - str := s.Provider - traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(travDiags) - if travDiags.HasErrors() { - return addrs.AbsProviderConfig{}, diags.Err() - } - - addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags.Err() -} - -func (s *ResourceState) init() { - s.Lock() - defer s.Unlock() - - if s.Primary == nil { - s.Primary = &InstanceState{} - } - s.Primary.init() - - if s.Dependencies == nil { - s.Dependencies = []string{} - } - - if s.Deposed == nil { - s.Deposed = make([]*InstanceState, 0) - } -} - -func (s *ResourceState) deepcopy() *ResourceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*ResourceState) -} - -// prune is used to remove any instances that are no longer required -func (s *ResourceState) prune() { - s.Lock() - defer s.Unlock() - - n := len(s.Deposed) - for i := 0; i < n; i++ { - inst := s.Deposed[i] - if inst == nil || inst.ID == "" { - copy(s.Deposed[i:], s.Deposed[i+1:]) - s.Deposed[n-1] = nil - n-- - i-- - } - } - s.Deposed = s.Deposed[:n] - - s.Dependencies = uniqueStrings(s.Dependencies) -} - -func (s *ResourceState) sort() { - s.Lock() - defer s.Unlock() - - sort.Strings(s.Dependencies) -} - -func (s *ResourceState) String() string { - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) - return buf.String() -} - -// InstanceState is used to track the unique state information belonging -// to a given instance. -type InstanceState struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral EphemeralState `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - ProviderMeta cty.Value - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` - - mu sync.Mutex -} - -func (s *InstanceState) Lock() { s.mu.Lock() } -func (s *InstanceState) Unlock() { s.mu.Unlock() } - -func (s *InstanceState) init() { - s.Lock() - defer s.Unlock() - - if s.Attributes == nil { - s.Attributes = make(map[string]string) - } - if s.Meta == nil { - s.Meta = make(map[string]interface{}) - } - s.Ephemeral.init() -} - -// NewInstanceStateShimmedFromValue is a shim method to lower a new-style -// object value representing the attributes of an instance object into the -// legacy InstanceState representation. -// -// This is for shimming to old components only and should not be used in new code. -func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { - attrs := hcl2shim.FlatmapValueFromHCL2(state) - return &InstanceState{ - ID: attrs["id"], - Attributes: attrs, - Meta: map[string]interface{}{ - "schema_version": schemaVersion, - }, - } -} - -// AttrsAsObjectValue shims from the legacy InstanceState representation to -// a new-style cty object value representation of the state attributes, using -// the given type for guidance. -// -// The given type must be the implied type of the schema of the resource type -// of the object whose state is being converted, or the result is undefined. -// -// This is for shimming from old components only and should not be used in -// new code. -func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { - if s == nil { - // if the state is nil, we need to construct a complete cty.Value with - // null attributes, rather than a single cty.NullVal(ty) - s = &InstanceState{} - } - - if s.Attributes == nil { - s.Attributes = map[string]string{} - } - - // make sure ID is included in the attributes. The InstanceState.ID value - // takes precedence. - if s.ID != "" { - s.Attributes["id"] = s.ID - } - - return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) -} - -// Copy all the Fields from another InstanceState -func (s *InstanceState) Set(from *InstanceState) { - s.Lock() - defer s.Unlock() - - from.Lock() - defer from.Unlock() - - s.ID = from.ID - s.Attributes = from.Attributes - s.Ephemeral = from.Ephemeral - s.Meta = from.Meta - s.Tainted = from.Tainted -} - -func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*InstanceState) -} - -func (s *InstanceState) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return s.ID == "" -} - -func (s *InstanceState) Equal(other *InstanceState) bool { - // Short circuit some nil checks - if s == nil || other == nil { - return s == other - } - s.Lock() - defer s.Unlock() - - // IDs must be equal - if s.ID != other.ID { - return false - } - - // Attributes must be equal - if len(s.Attributes) != len(other.Attributes) { - return false - } - for k, v := range s.Attributes { - otherV, ok := other.Attributes[k] - if !ok { - return false - } - - if v != otherV { - return false - } - } - - // Meta must be equal - if len(s.Meta) != len(other.Meta) { - return false - } - if s.Meta != nil && other.Meta != nil { - // We only do the deep check if both are non-nil. If one is nil - // we treat it as equal since their lengths are both zero (check - // above). - // - // Since this can contain numeric values that may change types during - // serialization, let's compare the serialized values. - sMeta, err := json.Marshal(s.Meta) - if err != nil { - // marshaling primitives shouldn't ever error out - panic(err) - } - otherMeta, err := json.Marshal(other.Meta) - if err != nil { - panic(err) - } - - if !bytes.Equal(sMeta, otherMeta) { - return false - } - } - - if s.Tainted != other.Tainted { - return false - } - - return true -} - -// MergeDiff takes a ResourceDiff and merges the attributes into -// this resource state in order to generate a new state. This new -// state can be used to provide updated attribute lookups for -// variable interpolation. -// -// If the diff attribute requires computing the value, and hence -// won't be available until apply, the value is replaced with the -// computeID. -func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { - result := s.DeepCopy() - if result == nil { - result = new(InstanceState) - } - result.init() - - if s != nil { - s.Lock() - defer s.Unlock() - for k, v := range s.Attributes { - result.Attributes[k] = v - } - } - if d != nil { - for k, diff := range d.CopyAttributes() { - if diff.NewRemoved { - delete(result.Attributes, k) - continue - } - if diff.NewComputed { - result.Attributes[k] = hcl2shim.UnknownVariableValue - continue - } - - result.Attributes[k] = diff.New - } - } - - return result -} - -func (s *InstanceState) String() string { - notCreated := "" - - if s == nil { - return notCreated - } - - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - - if s.ID == "" { - return notCreated - } - - buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) - - attributes := s.Attributes - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) - } - - buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) - - return buf.String() -} - -// EphemeralState is used for transient state that is only kept in-memory -type EphemeralState struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` - - // Type is used to specify the resource type for this instance. This is only - // required for import operations (as documented). If the documentation - // doesn't state that you need to set this, then don't worry about - // setting it. - Type string `json:"-"` -} - -func (e *EphemeralState) init() { - if e.ConnInfo == nil { - e.ConnInfo = make(map[string]string) - } -} - -func (e *EphemeralState) DeepCopy() *EphemeralState { - copy, err := copystructure.Config{Lock: true}.Copy(e) - if err != nil { - panic(err) - } - - return copy.(*EphemeralState) -} - -type jsonStateVersionIdentifier struct { - Version int `json:"version"` -} - -// Check if this is a V0 format - the magic bytes at the start of the file -// should be "tfstate" if so. We no longer support upgrading this type of -// state but return an error message explaining to a user how they can -// upgrade via the 0.6.x series. -func testForV0State(buf *bufio.Reader) error { - start, err := buf.Peek(len("tfstate")) - if err != nil { - return fmt.Errorf("Failed to check for magic bytes: %v", err) - } - if string(start) == "tfstate" { - return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + - "format which was used prior to Terraform 0.3. Please upgrade\n" + - "this state file using Terraform 0.6.16 prior to using it with\n" + - "Terraform 0.7.") - } - - return nil -} - -// ErrNoState is returned by ReadState when the io.Reader contains no data -var ErrNoState = errors.New("no state") - -// ReadState reads a state structure out of a reader in the format that -// was written by WriteState. -func ReadState(src io.Reader) (*State, error) { - // check for a nil file specifically, since that produces a platform - // specific error if we try to use it in a bufio.Reader. - if f, ok := src.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - buf := bufio.NewReader(src) - - if _, err := buf.Peek(1); err != nil { - if err == io.EOF { - return nil, ErrNoState - } - return nil, err - } - - if err := testForV0State(buf); err != nil { - return nil, err - } - - // If we are JSON we buffer the whole thing in memory so we can read it twice. - // This is suboptimal, but will work for now. - jsonBytes, err := ioutil.ReadAll(buf) - if err != nil { - return nil, fmt.Errorf("Reading state file failed: %v", err) - } - - versionIdentifier := &jsonStateVersionIdentifier{} - if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { - return nil, fmt.Errorf("Decoding state file version failed: %v", err) - } - - var result *State - switch versionIdentifier.Version { - case 0: - return nil, fmt.Errorf("State version 0 is not supported as JSON.") - case 1: - v1State, err := ReadStateV1(jsonBytes) - if err != nil { - return nil, err - } - - v2State, err := upgradeStateV1ToV2(v1State) - if err != nil { - return nil, err - } - - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - // increment the Serial whenever we upgrade state - v3State.Serial++ - result = v3State - case 2: - v2State, err := ReadStateV2(jsonBytes) - if err != nil { - return nil, err - } - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - v3State.Serial++ - result = v3State - case 3: - v3State, err := ReadStateV3(jsonBytes) - if err != nil { - return nil, err - } - - result = v3State - default: - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), versionIdentifier.Version) - } - - // If we reached this place we must have a result set - if result == nil { - panic("resulting state in load not set, assertion failed") - } - - // Prune the state when read it. Its possible to write unpruned states or - // for a user to make a state unpruned (nil-ing a module state for example). - result.prune() - - // Validate the state file is valid - if err := result.Validate(); err != nil { - return nil, err - } - - return result, nil -} - -func ReadStateV1(jsonBytes []byte) (*stateV1, error) { - v1State := &stateV1{} - if err := json.Unmarshal(jsonBytes, v1State); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - if v1State.Version != 1 { - return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ - "read %d, expected 1", v1State.Version) - } - - return v1State, nil -} - -func ReadStateV2(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - return state, nil -} - -func ReadStateV3(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - // Now we write the state back out to detect any changes in normaliztion. - // If our state is now written out differently, bump the serial number to - // prevent conflicts. - var buf bytes.Buffer - err := WriteState(state, &buf) - if err != nil { - return nil, err - } - - if !bytes.Equal(jsonBytes, buf.Bytes()) { - log.Println("[INFO] state modified during read or write. incrementing serial number") - state.Serial++ - } - - return state, nil -} - -// WriteState writes a state somewhere in a binary format. -func WriteState(d *State, dst io.Writer) error { - // writing a nil state is a noop. - if d == nil { - return nil - } - - // make sure we have no uninitialized fields - d.init() - - // Make sure it is sorted - d.sort() - - // Ensure the version is set - d.Version = StateVersion - - // If the TFVersion is set, verify it. We used to just set the version - // here, but this isn't safe since it changes the MD5 sum on some remote - // state storage backends such as Atlas. We now leave it be if needed. - if d.TFVersion != "" { - if _, err := version.NewVersion(d.TFVersion); err != nil { - return fmt.Errorf( - "Error writing state, invalid version: %s\n\n"+ - "The Terraform version when writing the state must be a semantic\n"+ - "version.", - d.TFVersion) - } - } - - // Encode the data in a human-friendly way - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return fmt.Errorf("Failed to encode state: %s", err) - } - - // We append a newline to the data because MarshalIndent doesn't - data = append(data, '\n') - - // Write the data out to the dst - if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { - return fmt.Errorf("Failed to write state: %v", err) - } - - return nil -} - -// resourceNameSort implements the sort.Interface to sort name parts lexically for -// strings and numerically for integer indexes. -type resourceNameSort []string - -func (r resourceNameSort) Len() int { return len(r) } -func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - -func (r resourceNameSort) Less(i, j int) bool { - iParts := strings.Split(r[i], ".") - jParts := strings.Split(r[j], ".") - - end := len(iParts) - if len(jParts) < end { - end = len(jParts) - } - - for idx := 0; idx < end; idx++ { - if iParts[idx] == jParts[idx] { - continue - } - - // sort on the first non-matching part - iInt, iIntErr := strconv.Atoi(iParts[idx]) - jInt, jIntErr := strconv.Atoi(jParts[idx]) - - switch { - case iIntErr == nil && jIntErr == nil: - // sort numerically if both parts are integers - return iInt < jInt - case iIntErr == nil: - // numbers sort before strings - return true - case jIntErr == nil: - return false - default: - return iParts[idx] < jParts[idx] - } - } - - return r[i] < r[j] -} - -// moduleStateSort implements sort.Interface to sort module states -type moduleStateSort []*ModuleState - -func (s moduleStateSort) Len() int { - return len(s) -} - -func (s moduleStateSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If either is nil, then the nil one is "less" than - if a == nil || b == nil { - return a == nil - } - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} - -func (s moduleStateSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -const stateValidateErrMultiModule = ` -Multiple modules with the same path: %s - -This means that there are multiple entries in the "modules" field -in your state file that point to the same module. This will cause Terraform -to behave in unexpected and error prone ways and is invalid. Please back up -and modify your state file manually to resolve this. -` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go deleted file mode 100644 index 2dcb11b7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go +++ /dev/null @@ -1,267 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" -) - -// StateFilter is responsible for filtering and searching a state. -// -// This is a separate struct from State rather than a method on State -// because StateFilter might create sidecar data structures to optimize -// filtering on the state. -// -// If you change the State, the filter created is invalid and either -// Reset should be called or a new one should be allocated. StateFilter -// will not watch State for changes and do this for you. If you filter after -// changing the State without calling Reset, the behavior is not defined. -type StateFilter struct { - State *State -} - -// Filter takes the addresses specified by fs and finds all the matches. -// The values of fs are resource addressing syntax that can be parsed by -// ParseResourceAddress. -func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { - // Parse all the addresses - as := make([]*ResourceAddress, len(fs)) - for i, v := range fs { - a, err := ParseResourceAddress(v) - if err != nil { - return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) - } - - as[i] = a - } - - // If we weren't given any filters, then we list all - if len(fs) == 0 { - as = append(as, &ResourceAddress{Index: -1}) - } - - // Filter each of the address. We keep track of this in a map to - // strip duplicates. - resultSet := make(map[string]*StateFilterResult) - for _, a := range as { - for _, r := range f.filterSingle(a) { - resultSet[r.String()] = r - } - } - - // Make the result list - results := make([]*StateFilterResult, 0, len(resultSet)) - for _, v := range resultSet { - results = append(results, v) - } - - // Sort them and return - sort.Sort(StateFilterResultSlice(results)) - return results, nil -} - -func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { - // The slice to keep track of results - var results []*StateFilterResult - - // Go through modules first. - modules := make([]*ModuleState, 0, len(f.State.Modules)) - for _, m := range f.State.Modules { - if f.relevant(a, m) { - modules = append(modules, m) - - // Only add the module to the results if we haven't specified a type. - // We also ignore the root module. - if a.Type == "" && len(m.Path) > 1 { - results = append(results, &StateFilterResult{ - Path: m.Path[1:], - Address: (&ResourceAddress{Path: m.Path[1:]}).String(), - Value: m, - }) - } - } - } - - // With the modules set, go through all the resources within - // the modules to find relevant resources. - for _, m := range modules { - for n, r := range m.Resources { - // The name in the state contains valuable information. Parse. - key, err := ParseResourceStateKey(n) - if err != nil { - // If we get an error parsing, then just ignore it - // out of the state. - continue - } - - // Older states and test fixtures often don't contain the - // type directly on the ResourceState. We add this so StateFilter - // is a bit more robust. - if r.Type == "" { - r.Type = key.Type - } - - if f.relevant(a, r) { - if a.Name != "" && a.Name != key.Name { - // Name doesn't match - continue - } - - if a.Index >= 0 && key.Index != a.Index { - // Index doesn't match - continue - } - - if a.Name != "" && a.Name != key.Name { - continue - } - - // Build the address for this resource - addr := &ResourceAddress{ - Path: m.Path[1:], - Name: key.Name, - Type: key.Type, - Index: key.Index, - } - - // Add the resource level result - resourceResult := &StateFilterResult{ - Path: addr.Path, - Address: addr.String(), - Value: r, - } - if !a.InstanceTypeSet { - results = append(results, resourceResult) - } - - // Add the instances - if r.Primary != nil { - addr.InstanceType = TypePrimary - addr.InstanceTypeSet = false - results = append(results, &StateFilterResult{ - Path: addr.Path, - Address: addr.String(), - Parent: resourceResult, - Value: r.Primary, - }) - } - - for _, instance := range r.Deposed { - if f.relevant(a, instance) { - addr.InstanceType = TypeDeposed - addr.InstanceTypeSet = true - results = append(results, &StateFilterResult{ - Path: addr.Path, - Address: addr.String(), - Parent: resourceResult, - Value: instance, - }) - } - } - } - } - } - - return results -} - -// relevant checks for relevance of this address against the given value. -func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { - switch v := raw.(type) { - case *ModuleState: - path := v.Path[1:] - - if len(addr.Path) > len(path) { - // Longer path in address means there is no way we match. - return false - } - - // Check for a prefix match - for i, p := range addr.Path { - if path[i] != p { - // Any mismatches don't match. - return false - } - } - - return true - case *ResourceState: - if addr.Type == "" { - // If we have no resource type, then we're interested in all! - return true - } - - // If the type doesn't match we fail immediately - if v.Type != addr.Type { - return false - } - - return true - default: - // If we don't know about it, let's just say no - return false - } -} - -// StateFilterResult is a single result from a filter operation. Filter -// can match multiple things within a state (module, resource, instance, etc.) -// and this unifies that. -type StateFilterResult struct { - // Module path of the result - Path []string - - // Address is the address that can be used to reference this exact result. - Address string - - // Parent, if non-nil, is a parent of this result. For instances, the - // parent would be a resource. For resources, the parent would be - // a module. For modules, this is currently nil. - Parent *StateFilterResult - - // Value is the actual value. This must be type switched on. It can be - // any data structures that `State` can hold: `ModuleState`, - // `ResourceState`, `InstanceState`. - Value interface{} -} - -func (r *StateFilterResult) String() string { - return fmt.Sprintf("%T: %s", r.Value, r.Address) -} - -func (r *StateFilterResult) sortedType() int { - switch r.Value.(type) { - case *ModuleState: - return 0 - case *ResourceState: - return 1 - case *InstanceState: - return 2 - default: - return 50 - } -} - -// StateFilterResultSlice is a slice of results that implements -// sort.Interface. The sorting goal is what is most appealing to -// human output. -type StateFilterResultSlice []*StateFilterResult - -func (s StateFilterResultSlice) Len() int { return len(s) } -func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s StateFilterResultSlice) Less(i, j int) bool { - a, b := s[i], s[j] - - // if these address contain an index, we want to sort by index rather than name - addrA, errA := ParseResourceAddress(a.Address) - addrB, errB := ParseResourceAddress(b.Address) - if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { - return addrA.Index < addrB.Index - } - - // If the addresses are different it is just lexographic sorting - if a.Address != b.Address { - return a.Address < b.Address - } - - // Addresses are the same, which means it matters on the type - return a.sortedType() < b.sortedType() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go deleted file mode 100644 index aa13cce8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go +++ /dev/null @@ -1,189 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*State, error) { - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*ModuleState, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &State{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - newState.sort() - newState.init() - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &RemoteState{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root and catch - // duplicate path errors later (as part of Validate). - path = rootModulePath - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*OutputState) - for key, output := range old.Outputs { - outputs[key] = &OutputState{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*ResourceState) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &ModuleState{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*InstanceState, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &ResourceState{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - ephemeral, err := old.Ephemeral.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &InstanceState{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Ephemeral: *ephemeral, - Meta: newMeta, - }, nil -} - -func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { - connInfo, err := copystructure.Copy(old.ConnInfo) - if err != nil { - return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) - } - return &EphemeralState{ - ConnInfo: connInfo.(map[string]string), - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go deleted file mode 100644 index e52d35fc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" -) - -// The upgrade process from V2 to V3 state does not affect the structure, -// so we do not need to redeclare all of the structs involved - we just -// take a deep copy of the old structure and assert the version number is -// as we expect. -func upgradeStateV2ToV3(old *State) (*State, error) { - new := old.DeepCopy() - - // Ensure the copied version is v2 before attempting to upgrade - if new.Version != 2 { - return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + - "a state which is not version 2.") - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - if resource.Deposed != nil { - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *InstanceState) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go deleted file mode 100644 index 68cffb41..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -// stateV1 keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -// -// stateV1 is _only used for the purposes of backwards compatibility -// and is no longer used in Terraform. -// -// For the upgrade process, see state_upgrade_v1_to_v2.go -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral ephemeralStateV1 `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go deleted file mode 100644 index 3f0418d9..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/testing.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "os" - "testing" -) - -// TestStateFile writes the given state to the path. -func TestStateFile(t *testing.T, path string, state *State) { - f, err := os.Create(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - if err := WriteState(state, f); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go deleted file mode 100644 index d587c89e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ /dev/null @@ -1,63 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/helper/logging" -) - -// GraphTransformer is the interface that transformers implement. This -// interface is only for transforms that need entire graph visibility. -type GraphTransformer interface { - Transform(*Graph) error -} - -// GraphVertexTransformer is an interface that transforms a single -// Vertex within with graph. This is a specialization of GraphTransformer -// that makes it easy to do vertex replacement. -// -// The GraphTransformer that runs through the GraphVertexTransformers is -// VertexTransformer. -type GraphVertexTransformer interface { - Transform(dag.Vertex) (dag.Vertex, error) -} - -// GraphTransformIf is a helper function that conditionally returns a -// GraphTransformer given. This is useful for calling inline a sequence -// of transforms without having to split it up into multiple append() calls. -func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { - if f() { - return then - } - - return nil -} - -type graphTransformerMulti struct { - Transforms []GraphTransformer -} - -func (t *graphTransformerMulti) Transform(g *Graph) error { - var lastStepStr string - for _, t := range t.Transforms { - log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) - if err := t.Transform(g); err != nil { - return err - } - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s ------", t, logging.Indent(thisStepStr)) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) - } - } - - return nil -} - -// GraphTransformMulti combines multiple graph transformers into a single -// GraphTransformer that runs all the individual graph transformers. -func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { - return &graphTransformerMulti{Transforms: ts} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go deleted file mode 100644 index d2e3d69d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ /dev/null @@ -1,16 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// GraphNodeAttachProvider is an interface that must be implemented by nodes -// that want provider configurations attached. -type GraphNodeAttachProvider interface { - // ProviderName with no module prefix. Example: "aws". - ProviderAddr() addrs.AbsProviderConfig - - // Sets the configuration - AttachProvider(*configs.Provider) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider_meta.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider_meta.go deleted file mode 100644 index 4eab86d7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider_meta.go +++ /dev/null @@ -1,15 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// GraphNodeAttachProviderMetaConfigs is an interface that must be implemented -// by nodes that want provider meta configurations attached. -type GraphNodeAttachProviderMetaConfigs interface { - GraphNodeConfigResource - - // Sets the configuration - AttachProviderMetaConfigs(map[addrs.Provider]*configs.ProviderMeta) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go deleted file mode 100644 index 37afbde2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go +++ /dev/null @@ -1,110 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes -// that want resource configurations attached. -type GraphNodeAttachResourceConfig interface { - GraphNodeConfigResource - - // Sets the configuration - AttachResourceConfig(*configs.Resource) -} - -// AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement -// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachResourceConfigTransformer struct { - Config *configs.Config // Config is the root node in the config tree -} - -func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - - // Go through and find GraphNodeAttachResource - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachResource implementations - arn, ok := v.(GraphNodeAttachResourceConfig) - if !ok { - continue - } - - // Determine what we're looking for - addr := arn.ResourceAddr() - - // Get the configuration. - config := t.Config.Descendent(addr.Module) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) - continue - } - - for _, r := range config.Module.ManagedResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - - // attach the provider_meta info - if gnapmc, ok := v.(GraphNodeAttachProviderMetaConfigs); ok { - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching provider meta configs to %s", dag.VertexName(v)) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no config set on the transformer for %s", dag.VertexName(v)) - continue - } - if config.Module == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no module in config for %s", dag.VertexName(v)) - continue - } - if config.Module.ProviderMetas == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no provider metas defined for %s", dag.VertexName(v)) - continue - } - gnapmc.AttachProviderMetaConfigs(config.Module.ProviderMetas) - } - } - for _, r := range config.Module.DataResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - - // attach the provider_meta info - if gnapmc, ok := v.(GraphNodeAttachProviderMetaConfigs); ok { - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching provider meta configs to %s", dag.VertexName(v)) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no config set on the transformer for %s", dag.VertexName(v)) - continue - } - if config.Module == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no module in config for %s", dag.VertexName(v)) - continue - } - if config.Module.ProviderMetas == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: no provider metas defined for %s", dag.VertexName(v)) - continue - } - gnapmc.AttachProviderMetaConfigs(config.Module.ProviderMetas) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go deleted file mode 100644 index 1499f920..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go +++ /dev/null @@ -1,101 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeAttachResourceSchema is an interface implemented by node types -// that need a resource schema attached. -type GraphNodeAttachResourceSchema interface { - GraphNodeConfigResource - GraphNodeProviderConsumer - - AttachResourceSchema(schema *configschema.Block, version uint64) -} - -// GraphNodeAttachProviderConfigSchema is an interface implemented by node types -// that need a provider configuration schema attached. -type GraphNodeAttachProviderConfigSchema interface { - GraphNodeProvider - - AttachProviderConfigSchema(*configschema.Block) -} - -// GraphNodeAttachProvisionerSchema is an interface implemented by node types -// that need one or more provisioner schemas attached. -type GraphNodeAttachProvisionerSchema interface { - ProvisionedBy() []string - - // SetProvisionerSchema is called during transform for each provisioner - // type returned from ProvisionedBy, providing the configuration schema - // for each provisioner in turn. The implementer should save these for - // later use in evaluating provisioner configuration blocks. - AttachProvisionerSchema(name string, schema *configschema.Block) -} - -// AttachSchemaTransformer finds nodes that implement -// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or -// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each -// and then passes them to a method implemented by the node. -type AttachSchemaTransformer struct { - Schemas *Schemas - Config *configs.Config -} - -func (t *AttachSchemaTransformer) Transform(g *Graph) error { - if t.Schemas == nil { - // Should never happen with a reasonable caller, but we'll return a - // proper error here anyway so that we'll fail gracefully. - return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") - } - - for _, v := range g.Vertices() { - - if tv, ok := v.(GraphNodeAttachResourceSchema); ok { - addr := tv.ResourceAddr() - mode := addr.Resource.Mode - typeName := addr.Resource.Type - providerFqn := tv.Provider() - - schema, version := t.Schemas.ResourceTypeConfig(providerFqn, mode, typeName) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) - tv.AttachResourceSchema(schema, version) - } - - if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { - providerAddr := tv.ProviderAddr() - schema := t.Schemas.ProviderConfig(providerAddr.Provider) - - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) - tv.AttachProviderConfigSchema(schema) - } - - if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { - names := tv.ProvisionedBy() - for _, name := range names { - schema := t.Schemas.ProvisionerConfig(name) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) - tv.AttachProvisionerSchema(name, schema) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go deleted file mode 100644 index 3af7b989..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// GraphNodeAttachResourceState is an interface that can be implemented -// to request that a ResourceState is attached to the node. -// -// Due to a historical naming inconsistency, the type ResourceState actually -// represents the state for a particular _instance_, while InstanceState -// represents the values for that instance during a particular phase -// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState -// is supported only for nodes that represent resource instances, even though -// the name might suggest it is for containing resources. -type GraphNodeAttachResourceState interface { - GraphNodeResourceInstance - - // Sets the state - AttachResourceState(*states.Resource) -} - -// AttachStateTransformer goes through the graph and attaches -// state to nodes that implement the interfaces above. -type AttachStateTransformer struct { - State *states.State // State is the root state -} - -func (t *AttachStateTransformer) Transform(g *Graph) error { - // If no state, then nothing to do - if t.State == nil { - log.Printf("[DEBUG] Not attaching any node states: overall state is nil") - return nil - } - - for _, v := range g.Vertices() { - // Nodes implement this interface to request state attachment. - an, ok := v.(GraphNodeAttachResourceState) - if !ok { - continue - } - addr := an.ResourceInstanceAddr() - - rs := t.State.Resource(addr.ContainingResource()) - if rs == nil { - log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - is := rs.Instance(addr.Resource.Key) - if is == nil { - // We don't actually need this here, since we'll attach the whole - // resource state, but we still check because it'd be weird - // for the specific instance we're attaching to not to exist. - log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - // make sure to attach a copy of the state, so instances can modify the - // same ResourceState. - an.AttachResourceState(rs.DeepCopy()) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go deleted file mode 100644 index 95606dd7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ /dev/null @@ -1,104 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// ConfigTransformer is a GraphTransformer that adds all the resources -// from the configuration to the graph. -// -// The module used to configure this transformer must be the root module. -// -// Only resources are added to the graph. Variables, outputs, and -// providers must be added via other transforms. -// -// Unlike ConfigTransformerOld, this transformer creates a graph with -// all resources including module resources, rather than creating module -// nodes that are then "flattened". -type ConfigTransformer struct { - Concrete ConcreteResourceNodeFunc - - // Module is the module to add resources from. - Config *configs.Config - - // Unique will only add resources that aren't already present in the graph. - Unique bool - - // Mode will only add resources that match the given mode - ModeFilter bool - Mode addrs.ResourceMode -} - -func (t *ConfigTransformer) Transform(g *Graph) error { - // If no configuration is available, we don't do anything - if t.Config == nil { - return nil - } - - // Start the transformation process - return t.transform(g, t.Config) -} - -func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { - // If no config, do nothing - if config == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, config); err != nil { - return err - } - - // Transform all the children. - for _, c := range config.Children { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { - path := config.Path - module := config.Module - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) - - allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) - for _, r := range module.ManagedResources { - allResources = append(allResources, r) - } - for _, r := range module.DataResources { - allResources = append(allResources, r) - } - - for _, r := range allResources { - relAddr := r.Addr() - - if t.ModeFilter && relAddr.Mode != t.Mode { - // Skip non-matching modes - continue - } - - abstract := &NodeAbstractResource{ - Addr: addrs.ConfigResource{ - Resource: relAddr, - Module: path, - }, - } - - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go deleted file mode 100644 index 01601bdd..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// CountBoundaryTransformer adds a node that depends on everything else -// so that it runs last in order to clean up the state for nodes that -// are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct { - Config *configs.Config -} - -func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{ - Config: t.Config, - } - g.Add(node) - - // Depends on everything - for _, v := range g.Vertices() { - // Don't connect to ourselves - if v == node { - continue - } - - // Connect! - g.Connect(dag.BasicEdge(node, v)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go deleted file mode 100644 index 948cf0e6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go +++ /dev/null @@ -1,169 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers, or might plan a create-before-destroy -// action. -type GraphNodeDestroyerCBD interface { - // CreateBeforeDestroy returns true if this node represents a node - // that is doing a CBD. - CreateBeforeDestroy() bool - - // ModifyCreateBeforeDestroy is called when the CBD state of a node - // is changed dynamically. This can return an error if this isn't - // allowed. - ModifyCreateBeforeDestroy(bool) error -} - -// GraphNodeAttachDestroyer is implemented by applyable nodes that have a -// companion destroy node. This allows the creation node to look up the status -// of the destroy node and determine if it needs to depose the existing state, -// or replace it. -// If a node is not marked as create-before-destroy in the configuration, but a -// dependency forces that status, only the destroy node will be aware of that -// status. -type GraphNodeAttachDestroyer interface { - // AttachDestroyNode takes a destroy node and saves a reference to that - // node in the receiver, so it can later check the status of - // CreateBeforeDestroy(). - AttachDestroyNode(n GraphNodeDestroyerCBD) -} - -// ForcedCBDTransformer detects when a particular CBD-able graph node has -// dependencies with another that has create_before_destroy set that require -// it to be forced on, and forces it on. -// -// This must be used in the plan graph builder to ensure that -// create_before_destroy settings are properly propagated before constructing -// the planned changes. This requires that the plannable resource nodes -// implement GraphNodeDestroyerCBD. -type ForcedCBDTransformer struct { -} - -func (t *ForcedCBDTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - // If there are no CBD decendent (dependent nodes), then we - // do nothing here. - if !t.hasCBDDescendent(g, v) { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) - continue - } - - // If this isn't naturally a CBD node, this means that an descendent is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } - } else { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) - } - } - return nil -} - -// hasCBDDescendent returns true if any descendent (node that depends on this) -// has CBD set. -func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { - s, _ := g.Descendents(v) - if s == nil { - return true - } - - for _, ov := range s { - dn, ok := ov.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some descendent is CreateBeforeDestroy, so we need to follow suit - log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) - return true - } - } - - return false -} - -// CBDEdgeTransformer modifies the edges of CBD nodes that went through -// the DestroyEdgeTransformer to have the right dependencies. There are -// two real tasks here: -// -// 1. With CBD, the destroy edge is inverted: the destroy depends on -// the creation. -// -// 2. A_d must depend on resources that depend on A. This is to enable -// the destroy to only happen once nodes that depend on A successfully -// update to A. Example: adding a web server updates the load balancer -// before deleting the old web server. -// -// This transformer requires that a previous transformer has already forced -// create_before_destroy on for nodes that are depended on by explicit CBD -// nodes. This is the logic in ForcedCBDTransformer, though in practice we -// will get here by recording the CBD-ness of each change in the plan during -// the plan walk and then forcing the nodes into the appropriate setting during -// DiffTransformer when building the apply graph. -type CBDEdgeTransformer struct { - // Module and State are only needed to look up dependencies in - // any way possible. Either can be nil if not availabile. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners so we can - // properly resolve implicit dependencies. - Schemas *Schemas -} - -func (t *CBDEdgeTransformer) Transform(g *Graph) error { - // Go through and reverse any destroy edges - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - if _, ok = v.(GraphNodeDestroyer); !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - continue - } - - // Find the resource edges - for _, e := range g.EdgesTo(v) { - src := e.Source() - - // If source is a create node, invert the edge. - // This covers both the node's own creator, as well as reversing - // any dependants' edges. - if _, ok := src.(GraphNodeCreator); ok { - log.Printf("[TRACE] CBDEdgeTransformer: reversing edge %s -> %s", dag.VertexName(src), dag.VertexName(v)) - g.RemoveEdge(e) - g.Connect(dag.BasicEdge(v, src)) - } - } - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go deleted file mode 100644 index acf804db..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ /dev/null @@ -1,304 +0,0 @@ -package terraform - -import ( - "log" - "sort" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeDestroyer must be implemented by nodes that destroy resources. -type GraphNodeDestroyer interface { - dag.Vertex - - // DestroyAddr is the address of the resource that is being - // destroyed by this node. If this returns nil, then this node - // is not destroying anything. - DestroyAddr() *addrs.AbsResourceInstance -} - -// GraphNodeCreator must be implemented by nodes that create OR update resources. -type GraphNodeCreator interface { - // CreateAddr is the address of the resource being created or updated - CreateAddr() *addrs.AbsResourceInstance -} - -// DestroyEdgeTransformer is a GraphTransformer that creates the proper -// references for destroy resources. Destroy resources are more complex -// in that they must be depend on the destruction of resources that -// in turn depend on the CREATION of the node being destroy. -// -// That is complicated. Visually: -// -// B_d -> A_d -> A -> B -// -// Notice that A destroy depends on B destroy, while B create depends on -// A create. They're inverted. This must be done for example because often -// dependent resources will block parent resources from deleting. Concrete -// example: VPC with subnets, the VPC can't be deleted while there are -// still subnets. -type DestroyEdgeTransformer struct { - // These are needed to properly build the graph of dependencies - // to determine what a destroy node depends on. Any of these can be nil. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners in order - // to properly resolve implicit dependencies. - Schemas *Schemas -} - -func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. - destroyers := make(map[string][]GraphNodeDestroyer) - - // Record the creators, which will need to depend on the destroyers if they - // are only being updated. - creators := make(map[string]GraphNodeCreator) - - // destroyersByResource records each destroyer by the AbsResourceAddress. - // We use this because dependencies are only referenced as resources, but we - // will want to connect all the individual instances for correct ordering. - destroyersByResource := make(map[string][]GraphNodeDestroyer) - for _, v := range g.Vertices() { - switch n := v.(type) { - case GraphNodeDestroyer: - addrP := n.DestroyAddr() - if addrP == nil { - log.Printf("[WARN] DestroyEdgeTransformer: %q (%T) has no destroy address", dag.VertexName(n), v) - continue - } - addr := *addrP - - key := addr.String() - log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(n), v, key) - destroyers[key] = append(destroyers[key], n) - - resAddr := addr.Resource.Resource.Absolute(addr.Module).String() - destroyersByResource[resAddr] = append(destroyersByResource[resAddr], n) - case GraphNodeCreator: - addr := n.CreateAddr() - creators[addr.String()] = n - } - } - - // If we aren't destroying anything, there will be no edges to make - // so just exit early and avoid future work. - if len(destroyers) == 0 { - return nil - } - - // Connect destroy despendencies as stored in the state - for _, ds := range destroyers { - for _, des := range ds { - ri, ok := des.(GraphNodeResourceInstance) - if !ok { - continue - } - - for _, resAddr := range ri.StateDependencies() { - for _, desDep := range destroyersByResource[resAddr.String()] { - log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(desDep), dag.VertexName(des)) - g.Connect(dag.BasicEdge(desDep, des)) - - } - } - } - } - - // connect creators to any destroyers on which they may depend - for _, c := range creators { - ri, ok := c.(GraphNodeResourceInstance) - if !ok { - continue - } - - for _, resAddr := range ri.StateDependencies() { - for _, desDep := range destroyersByResource[resAddr.String()] { - log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(c), dag.VertexName(desDep)) - g.Connect(dag.BasicEdge(c, desDep)) - - } - } - } - - // Go through and connect creators to destroyers. Going along with - // our example, this makes: A_d => A - for _, v := range g.Vertices() { - cn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - addr := cn.CreateAddr() - if addr == nil { - continue - } - - for _, d := range destroyers[addr.String()] { - // For illustrating our example - a_d := d.(dag.Vertex) - a := v - - log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", - dag.VertexName(a), dag.VertexName(a_d)) - - g.Connect(dag.BasicEdge(a, a_d)) - - // Attach the destroy node to the creator - // There really shouldn't be more than one destroyer, but even if - // there are, any of them will represent the correct - // CreateBeforeDestroy status. - if n, ok := cn.(GraphNodeAttachDestroyer); ok { - if d, ok := d.(GraphNodeDestroyerCBD); ok { - n.AttachDestroyNode(d) - } - } - } - } - - return nil -} - -// Remove any nodes that aren't needed when destroying modules. -// Variables, outputs, locals, and expanders may not be able to evaluate -// correctly, so we can remove these if nothing depends on them. The module -// closers also need to disable their use of expansion if the module itself is -// no longer present. -type pruneUnusedNodesTransformer struct { -} - -func (t *pruneUnusedNodesTransformer) Transform(g *Graph) error { - // We need a reverse depth first walk of modules, processing them in order - // from the leaf modules to the root. This allows us to remove unneeded - // dependencies from child modules, freeing up nodes in the parent module - // to also be removed. - - // First collect the nodes into their respective modules based on - // configuration path. - moduleMap := make(map[string]pruneUnusedNodesMod) - for _, v := range g.Vertices() { - var path addrs.Module - switch v := v.(type) { - case GraphNodeModulePath: - path = v.ModulePath() - default: - continue - } - m := moduleMap[path.String()] - m.addr = path - m.nodes = append(m.nodes, v) - - moduleMap[path.String()] = m - } - - // now we need to restructure the modules so we can sort them - var modules []pruneUnusedNodesMod - - for _, mod := range moduleMap { - modules = append(modules, mod) - } - - // Sort them by path length, longest first, so that start with the deepest - // modules. The order of modules at the same tree level doesn't matter, we - // just need to ensure that child modules are processed before parent - // modules. - sort.Slice(modules, func(i, j int) bool { - return len(modules[i].addr) > len(modules[j].addr) - }) - - for _, mod := range modules { - mod.removeUnused(g) - } - - return nil -} - -// pruneUnusedNodesMod is a container to hold the nodes that belong to a -// particular configuration module for the pruneUnusedNodesTransformer -type pruneUnusedNodesMod struct { - addr addrs.Module - nodes []dag.Vertex -} - -// Remove any unused locals, variables, outputs and expanders. Since module -// closers can also lookup expansion info to detect orphaned instances, disable -// them if their associated expander is removed. -func (m *pruneUnusedNodesMod) removeUnused(g *Graph) { - // We modify the nodes slice during processing here. - // Make a copy so no one is surprised by this changing in the future. - nodes := make([]dag.Vertex, len(m.nodes)) - copy(nodes, m.nodes) - - // since we have no defined structure within the module, just cycle through - // the nodes in each module until there are no more removals - removed := true - for { - if !removed { - return - } - removed = false - - for i := 0; i < len(nodes); i++ { - // run this in a closure, so we can return early rather than - // dealing with complex looping and labels - func() { - n := nodes[i] - switch n.(type) { - case graphNodeTemporaryValue: - // temporary value, which consist of variables, locals, and - // outputs, must be kept if anything refers to them. - if n, ok := n.(GraphNodeModulePath); ok { - // root outputs always have an implicit dependency on - // remote state. - if n.ModulePath().IsRoot() { - return - } - } - for _, v := range g.UpEdges(n) { - // keep any value which is connected through a - // reference - if _, ok := v.(GraphNodeReferencer); ok { - return - } - } - - case graphNodeExpandsInstances: - // Any nodes that expand instances are kept when their - // instances may need to be evaluated. - for _, v := range g.UpEdges(n) { - switch v.(type) { - case graphNodeExpandsInstances: - // expanders can always depend on module expansion - // themselves - return - case GraphNodeResourceInstance: - // resource instances always depend on their - // resource node, which is an expander - return - } - } - - default: - return - } - - log.Printf("[DEBUG] pruneUnusedNodes: %s is no longer needed, removing", dag.VertexName(n)) - g.Remove(n) - removed = true - - // remove the node from our iteration as well - last := len(nodes) - 1 - nodes[i], nodes[last] = nodes[last], nodes[i] - nodes = nodes[:last] - }() - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go deleted file mode 100644 index bed71a0e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go +++ /dev/null @@ -1,183 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// DiffTransformer is a GraphTransformer that adds graph nodes representing -// each of the resource changes described in the given Changes object. -type DiffTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - State *states.State - Changes *plans.Changes -} - -func (t *DiffTransformer) Transform(g *Graph) error { - if t.Changes == nil || len(t.Changes.Resources) == 0 { - // Nothing to do! - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer starting") - - var diags tfdiags.Diagnostics - state := t.State - changes := t.Changes - - // DiffTransformer creates resource _instance_ nodes. If there are any - // whole-resource nodes already in the graph, we must ensure that they - // get evaluated before any of the corresponding instances by creating - // dependency edges, so we'll do some prep work here to ensure we'll only - // create connections to nodes that existed before we started here. - resourceNodes := map[string][]GraphNodeConfigResource{} - for _, node := range g.Vertices() { - rn, ok := node.(GraphNodeConfigResource) - if !ok { - continue - } - // We ignore any instances that _also_ implement - // GraphNodeResourceInstance, since in the unlikely event that they - // do exist we'd probably end up creating cycles by connecting them. - if _, ok := node.(GraphNodeResourceInstance); ok { - continue - } - - addr := rn.ResourceAddr().String() - resourceNodes[addr] = append(resourceNodes[addr], rn) - } - - for _, rc := range changes.Resources { - addr := rc.Addr - dk := rc.DeposedKey - - log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) - - // Depending on the action we'll need some different combinations of - // nodes, because destroying uses a special node type separate from - // other actions. - var update, delete, createBeforeDestroy bool - switch rc.Action { - case plans.NoOp: - continue - case plans.Delete: - delete = true - case plans.DeleteThenCreate, plans.CreateThenDelete: - update = true - delete = true - createBeforeDestroy = (rc.Action == plans.CreateThenDelete) - default: - update = true - } - - if dk != states.NotDeposed && update { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change for deposed object", - fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), - )) - continue - } - - // If we're going to do a create_before_destroy Replace operation then - // we need to allocate a DeposedKey to use to retain the - // not-yet-destroyed prior object, so that the delete node can destroy - // _that_ rather than the newly-created node, which will be current - // by the time the delete node is visited. - if update && delete && createBeforeDestroy { - // In this case, variable dk will be the _pre-assigned_ DeposedKey - // that must be used if the update graph node deposes the current - // instance, which will then align with the same key we pass - // into the destroy node to ensure we destroy exactly the deposed - // object we expect. - if state != nil { - ris := state.ResourceInstance(addr) - if ris == nil { - // Should never happen, since we don't plan to replace an - // instance that doesn't exist yet. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change", - fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), - )) - continue - } - - // Allocating a deposed key separately from using it can be racy - // in general, but we assume here that nothing except the apply - // node we instantiate below will actually make new deposed objects - // in practice, and so the set of already-used keys will not change - // between now and then. - dk = ris.FindUnusedDeposedKey() - } else { - // If we have no state at all yet then we can use _any_ - // DeposedKey. - dk = states.NewDeposedKey() - } - } - - if update { - // All actions except destroying the node type chosen by t.Concrete - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - if createBeforeDestroy { - // We'll attach our pre-allocated DeposedKey to the node if - // it supports that. NodeApplyableResourceInstance is the - // specific concrete node type we are looking for here really, - // since that's the only node type that might depose objects. - if dn, ok := node.(GraphNodeDeposer); ok { - dn.SetPreallocatedDeposedKey(dk) - } - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) - } else { - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) - } - - g.Add(node) - rsrcAddr := addr.ContainingResource().String() - for _, rsrcNode := range resourceNodes[rsrcAddr] { - g.Connect(dag.BasicEdge(node, rsrcNode)) - } - } - - if delete { - // Destroying always uses a destroy-specific node type, though - // which one depends on whether we're destroying a current object - // or a deposed object. - var node GraphNodeResourceInstance - abstract := NewNodeAbstractResourceInstance(addr) - if dk == states.NotDeposed { - node = &NodeDestroyResourceInstance{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } else { - node = &NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } - if dk == states.NotDeposed { - log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) - } else { - log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) - } - g.Add(node) - } - - } - - log.Printf("[TRACE] DiffTransformer complete") - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go deleted file mode 100644 index dca71b63..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -// GraphNodeDynamicExpandable is an interface that nodes can implement -// to signal that they can be expanded at eval-time (hence dynamic). -// These nodes are given the eval context and are expected to return -// a new subgraph. -type GraphNodeDynamicExpandable interface { - DynamicExpand(EvalContext) (*Graph, error) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go deleted file mode 100644 index c801e5c8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportProviderValidateTransformer is a GraphTransformer that goes through -// the providers in the graph and validates that they only depend on variables. -type ImportProviderValidateTransformer struct{} - -func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { - var diags tfdiags.Diagnostics - - for _, v := range g.Vertices() { - // We only care about providers - pv, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // We only care about providers that reference things - rn, ok := pv.(GraphNodeReferencer) - if !ok { - continue - } - - for _, ref := range rn.References() { - if _, ok := ref.Subject.(addrs.InputVariable); !ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider dependency for import", - Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go deleted file mode 100644 index 682161fb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ /dev/null @@ -1,283 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportStateTransformer is a GraphTransformer that adds nodes to the -// graph to represent the imports we want to do for resources. -type ImportStateTransformer struct { - Targets []*ImportTarget - Config *configs.Config -} - -func (t *ImportStateTransformer) Transform(g *Graph) error { - for _, target := range t.Targets { - - // This is only likely to happen in misconfigured tests - if t.Config == nil { - return fmt.Errorf("Cannot import into an empty configuration.") - } - - // Get the module config - modCfg := t.Config.Descendent(target.Addr.Module.Module()) - if modCfg == nil { - return fmt.Errorf("Module %s not found.", target.Addr.Module.Module()) - } - - // Get the resource config - rsCfg := modCfg.Module.ResourceByAddr(target.Addr.Resource.Resource) - if rsCfg == nil { - return fmt.Errorf("Resource %s not found in the configuration.", target.Addr) - } - - // Get the provider FQN for the resource from the resource configuration - providerFqn := rsCfg.Provider - - // This is only likely to happen in misconfigured tests. - if rsCfg == nil { - return fmt.Errorf("provider for resource %s not found in the configuration.", target.Addr) - } - - // Get the provider local config for the resource - localpCfg := rsCfg.ProviderConfigAddr() - - providerAddr := addrs.AbsProviderConfig{ - Provider: providerFqn, - Alias: localpCfg.Alias, - Module: target.Addr.Module.Module(), - } - - node := &graphNodeImportState{ - Addr: target.Addr, - ID: target.ID, - ProviderAddr: providerAddr, - } - g.Add(node) - } - return nil -} - -type graphNodeImportState struct { - Addr addrs.AbsResourceInstance // Addr is the resource address to import into - ID string // ID is the ID to import as - ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type - ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - - states []providers.ImportedResource -} - -var ( - _ GraphNodeModulePath = (*graphNodeImportState)(nil) - _ GraphNodeEvalable = (*graphNodeImportState)(nil) - _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) - _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) -) - -func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) ProvidedBy() (addrs.ProviderConfig, bool) { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr, false -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) Provider() addrs.Provider { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr.Provider -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { - n.ResolvedProvider = addr -} - -// GraphNodeModuleInstance -func (n *graphNodeImportState) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeModulePath -func (n *graphNodeImportState) ModulePath() addrs.Module { - return n.Addr.Module.Module() -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportState) EvalTree() EvalNode { - var provider providers.Interface - - // Reset our states - n.states = nil - - // Return our sequence - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - }, - &EvalImportState{ - Addr: n.Addr.Resource, - Provider: &provider, - ID: n.ID, - Output: &n.states, - }, - }, - } -} - -// GraphNodeDynamicExpandable impl. -// -// We use DynamicExpand as a way to generate the subgraph of refreshes -// and state inserts we need to do for our import state. Since they're new -// resources they don't depend on anything else and refreshes are isolated -// so this is nearly a perfect use case for dynamic expand. -func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - g := &Graph{Path: ctx.Path()} - - // nameCounter is used to de-dup names in the state. - nameCounter := make(map[string]int) - - // Compile the list of addresses that we'll be inserting into the state. - // We do this ahead of time so we can verify that we aren't importing - // something that already exists. - addrs := make([]addrs.AbsResourceInstance, len(n.states)) - for i, state := range n.states { - addr := n.Addr - if t := state.TypeName; t != "" { - addr.Resource.Resource.Type = t - } - - // Determine if we need to suffix the name to de-dup - key := addr.String() - count, ok := nameCounter[key] - if ok { - count++ - addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) - } - nameCounter[key] = count - - // Add it to our list - addrs[i] = addr - } - - // Verify that all the addresses are clear - state := ctx.State() - for _, addr := range addrs { - existing := state.ResourceInstance(addr) - if existing != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource already managed by Terraform", - fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), - )) - continue - } - } - if diags.HasErrors() { - // Bail out early, then. - return nil, diags.Err() - } - - // For each of the states, we add a node to handle the refresh/add to state. - // "n.states" is populated by our own EvalTree with the result of - // ImportState. Since DynamicExpand is always called after EvalTree, this - // is safe. - for i, state := range n.states { - g.Add(&graphNodeImportStateSub{ - TargetAddr: addrs[i], - State: state, - ResolvedProvider: n.ResolvedProvider, - }) - } - - // Root transform for a single root - t := &RootTransformer{} - if err := t.Transform(g); err != nil { - return nil, err - } - - // Done! - return g, diags.Err() -} - -// graphNodeImportStateSub is the sub-node of graphNodeImportState -// and is part of the subgraph. This node is responsible for refreshing -// and adding a resource to the state once it is imported. -type graphNodeImportStateSub struct { - TargetAddr addrs.AbsResourceInstance - State providers.ImportedResource - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeModuleInstance = (*graphNodeImportStateSub)(nil) - _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) -) - -func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result", n.TargetAddr) -} - -func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { - return n.TargetAddr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportStateSub) EvalTree() EvalNode { - // If the Ephemeral type isn't set, then it is an error - if n.State.TypeName == "" { - err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) - return &EvalReturnError{Error: &err} - } - - state := n.State.AsInstanceObject() - - var provider providers.Interface - var providerSchema *ProviderSchema - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalRefresh{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalImportStateVerify{ - Addr: n.TargetAddr.Resource, - State: &state, - }, - &EvalWriteState{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go deleted file mode 100644 index d5b97e14..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// LocalTransformer is a GraphTransformer that adds all the local values -// from the configuration to the graph. -type LocalTransformer struct { - Config *configs.Config -} - -func (t *LocalTransformer) Transform(g *Graph) error { - return t.transformModule(g, t.Config) -} - -func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { - if c == nil { - // Can't have any locals if there's no config - return nil - } - - for _, local := range c.Module.Locals { - addr := addrs.LocalValue{Name: local.Name} - node := &nodeExpandLocal{ - Addr: addr, - Module: c.Path, - Config: local, - } - g.Add(node) - } - - // Also populate locals for child modules - for _, cc := range c.Children { - if err := t.transformModule(g, cc); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_expansion.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_expansion.go deleted file mode 100644 index 75275520..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_expansion.go +++ /dev/null @@ -1,141 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// ModuleExpansionTransformer is a GraphTransformer that adds graph nodes -// representing the possible expansion of each module call in the configuration, -// and ensures that any nodes representing objects declared within a module -// are dependent on the expansion node so that they will be visited only -// after the module expansion has been decided. -// -// This transform must be applied only after all nodes representing objects -// that can be contained within modules have already been added. -type ModuleExpansionTransformer struct { - Config *configs.Config - - // Concrete allows injection of a wrapped module node by the graph builder - // to alter the evaluation behavior. - Concrete ConcreteModuleNodeFunc - - closers map[string]*nodeCloseModule -} - -func (t *ModuleExpansionTransformer) Transform(g *Graph) error { - t.closers = make(map[string]*nodeCloseModule) - // The root module is always a singleton and so does not need expansion - // processing, but any descendent modules do. We'll process them - // recursively using t.transform. - for _, cfg := range t.Config.Children { - err := t.transform(g, cfg, nil) - if err != nil { - return err - } - } - - // Now go through and connect all nodes to their respective module closers. - // This is done all at once here, because orphaned modules were already - // handled by the RemovedModuleTransformer, and those module closers are in - // the graph already, and need to be connected to their parent closers. - for _, v := range g.Vertices() { - // skip closers so they don't attach to themselves - if _, ok := v.(*nodeCloseModule); ok { - continue - } - - // any node that executes within the scope of a module should be a - // GraphNodeModulePath - pather, ok := v.(GraphNodeModulePath) - if !ok { - continue - } - if closer, ok := t.closers[pather.ModulePath().String()]; ok { - // The module closer depends on each child resource instance, since - // during apply the module expansion will complete before the - // individual instances are applied. - g.Connect(dag.BasicEdge(closer, v)) - } - } - - // Modules implicitly depend on their child modules, so connect closers to - // other which contain their path. - for _, c := range t.closers { - for _, d := range t.closers { - if len(d.Addr) > len(c.Addr) && c.Addr.Equal(d.Addr[:len(c.Addr)]) { - g.Connect(dag.BasicEdge(c, d)) - } - } - } - - return nil -} - -func (t *ModuleExpansionTransformer) transform(g *Graph, c *configs.Config, parentNode dag.Vertex) error { - _, call := c.Path.Call() - modCall := c.Parent.Module.ModuleCalls[call.Name] - - n := &nodeExpandModule{ - Addr: c.Path, - Config: c.Module, - ModuleCall: modCall, - } - var v dag.Vertex = n - if t.Concrete != nil { - v = t.Concrete(n) - } - - g.Add(v) - log.Printf("[TRACE] ModuleExpansionTransformer: Added %s as %T", c.Path, v) - - if parentNode != nil { - log.Printf("[TRACE] ModuleExpansionTransformer: %s must wait for expansion of %s", dag.VertexName(v), dag.VertexName(parentNode)) - g.Connect(dag.BasicEdge(v, parentNode)) - } - - // Add the closer (which acts as the root module node) to provide a - // single exit point for the expanded module. - closer := &nodeCloseModule{ - Addr: c.Path, - } - g.Add(closer) - g.Connect(dag.BasicEdge(closer, v)) - t.closers[c.Path.String()] = closer - - for _, childV := range g.Vertices() { - // don't connect a node to itself - if childV == v { - continue - } - - var path addrs.Module - switch t := childV.(type) { - case GraphNodeDestroyer: - // skip destroyers, as they can only depend on other resources. - continue - - case GraphNodeModulePath: - path = t.ModulePath() - case GraphNodeReferenceOutside: - path, _ = t.ReferenceOutside() - default: - continue - } - - if path.Equal(c.Path) { - log.Printf("[TRACE] ModuleExpansionTransformer: %s must wait for expansion of %s", dag.VertexName(childV), c.Path) - g.Connect(dag.BasicEdge(childV, v)) - } - } - - // Also visit child modules, recursively. - for _, cc := range c.Children { - return t.transform(g, cc, v) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go deleted file mode 100644 index 99a86c01..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go +++ /dev/null @@ -1,120 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs" -) - -// ModuleVariableTransformer is a GraphTransformer that adds all the variables -// in the configuration to the graph. -// -// Any "variable" block present in any non-root module is included here, even -// if a particular variable is not referenced from anywhere. -// -// The transform will produce errors if a call to a module does not conform -// to the expected set of arguments, but this transformer is not in a good -// position to return errors and so the validate walk should include specific -// steps for validating module blocks, separate from this transform. -type ModuleVariableTransformer struct { - Config *configs.Config -} - -func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Config) -} - -func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { - // We can have no variables if we have no configuration. - if c == nil { - return nil - } - - // Transform all the children first. - for _, cc := range c.Children { - if err := t.transform(g, c, cc); err != nil { - return err - } - } - - // If we're processing anything other than the root module then we'll - // add graph nodes for variables defined inside. (Variables for the root - // module are dealt with in RootVariableTransformer). - // If we have a parent, we can determine if a module variable is being - // used, so we transform this. - if parent != nil { - if err := t.transformSingle(g, parent, c); err != nil { - return err - } - } - - return nil -} - -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { - _, call := c.Path.Call() - - // Find the call in the parent module configuration, so we can get the - // expressions given for each input variable at the call site. - callConfig, exists := parent.Module.ModuleCalls[call.Name] - if !exists { - // This should never happen, since it indicates an improperly-constructed - // configuration tree. - panic(fmt.Errorf("no module call block found for %s", c.Path)) - } - - // We need to construct a schema for the expected call arguments based on - // the configured variables in our config, which we can then use to - // decode the content of the call block. - schema := &hcl.BodySchema{} - for _, v := range c.Module.Variables { - schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ - Name: v.Name, - Required: v.Default == cty.NilVal, - }) - } - - content, contentDiags := callConfig.Config.Content(schema) - if contentDiags.HasErrors() { - // Validation code elsewhere should deal with any errors before we - // get in here, but we'll report them out here just in case, to - // avoid crashes. - var diags tfdiags.Diagnostics - diags = diags.Append(contentDiags) - return diags.Err() - } - - for _, v := range c.Module.Variables { - var expr hcl.Expression - if attr := content.Attributes[v.Name]; attr != nil { - expr = attr.Expr - } else { - // No expression provided for this variable, so we'll make a - // synthetic one using the variable's default value. - expr = &hclsyntax.LiteralValueExpr{ - Val: v.Default, - SrcRange: v.DeclRange, // This is not exact, but close enough - } - } - - // Add a plannable node, as the variable may expand - // during module expansion - node := &nodeExpandModuleVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Module: c.Path, - Config: v, - Expr: expr, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go deleted file mode 100644 index 0e2ead6f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// OrphanResourceInstanceCountTransformer is a GraphTransformer that adds orphans -// for an expanded count to the graph. The determination of this depends -// on the count argument given. -// -// Orphans are found by comparing the count to what is found in the state. -// This transform assumes that if an element in the state is within the count -// bounds given, that it is not an orphan. -type OrphanResourceInstanceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - Addr addrs.AbsResource // Addr of the resource to look for orphans - InstanceAddrs []addrs.AbsResourceInstance // Addresses that currently exist in config - State *states.State // Full global state -} - -func (t *OrphanResourceInstanceCountTransformer) Transform(g *Graph) error { - rs := t.State.Resource(t.Addr) - if rs == nil { - return nil // Resource doesn't exist in state, so nothing to do! - } - - // This is an O(n*m) analysis, which we accept for now because the - // number of instances of a single resource ought to always be small in any - // reasonable Terraform configuration. -Have: - for key := range rs.Instances { - thisAddr := rs.Addr.Instance(key) - for _, wantAddr := range t.InstanceAddrs { - if wantAddr.Equal(thisAddr) { - continue Have - } - } - // If thisAddr is not in t.InstanceAddrs then we've found an "orphan" - - abstract := NewNodeAbstractResourceInstance(thisAddr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceCountTransformer: adding %s as %T", thisAddr, node) - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go deleted file mode 100644 index ba1bce93..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ /dev/null @@ -1,60 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" -) - -// OrphanOutputTransformer finds the outputs that aren't present -// in the given config that are in the state and adds them to the graph -// for deletion. -type OrphanOutputTransformer struct { - Config *configs.Config // Root of config tree - State *states.State // State is the root state -} - -func (t *OrphanOutputTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[DEBUG] No state, no orphan outputs") - return nil - } - - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - return nil -} - -func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the config for this path, which is nil if the entire module has been - // removed. - var outputs map[string]*configs.Output - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - outputs = c.Module.Outputs - } - - // An output is "orphaned" if it's present in the state but not declared - // in the configuration. - for name := range ms.OutputValues { - if _, exists := outputs[name]; exists { - continue - } - - g.Add(&NodeDestroyableOutput{ - Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), - }) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go deleted file mode 100644 index 6098d898..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned -// resource instances to the graph. An "orphan" is an instance that is present -// in the state but belongs to a resource that is no longer present in the -// configuration. -// -// This is not the transformer that deals with "count orphans" (instances that -// are no longer covered by a resource's "count" or "for_each" setting); that's -// handled instead by OrphanResourceCountTransformer. -type OrphanResourceInstanceTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *states.State - - // Config is the root node in the configuration tree. We'll look up - // the appropriate note in this tree using the path in each node. - Config *configs.Config -} - -func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceInstanceTransformer used without setting Config") - } - - // Go through the modules and for each module transform in order - // to add the orphan. - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - - return nil -} - -func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the configuration for this module. The configuration might be - // nil if the module was removed from the configuration. This is okay, - // this just means that every resource is an orphan. - var m *configs.Module - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - m = c.Module - } - - // An "orphan" is a resource that is in the state but not the configuration, - // so we'll walk the state resources and try to correlate each of them - // with a configuration block. Each orphan gets a node in the graph whose - // type is decided by t.Concrete. - // - // We don't handle orphans related to changes in the "count" and "for_each" - // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. - for _, rs := range ms.Resources { - if m != nil { - if r := m.ResourceByAddr(rs.Addr.Resource); r != nil { - continue - } - } - - for key := range rs.Instances { - addr := rs.Addr.Instance(key) - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) - g.Add(node) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go deleted file mode 100644 index 4d51dabd..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ /dev/null @@ -1,103 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// OutputTransformer is a GraphTransformer that adds all the outputs -// in the configuration to the graph. -// -// This is done for the apply graph builder even if dependent nodes -// aren't changing since there is no downside: the state will be available -// even if the dependent items aren't changing. -type OutputTransformer struct { - Config *configs.Config -} - -func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Config) -} - -func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { - // If we have no config then there can be no outputs. - if c == nil { - return nil - } - - // Transform all the children. We must do this first because - // we can reference module outputs and they must show up in the - // reference map. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - - // Add plannable outputs to the graph, which will be dynamically expanded - // into NodeApplyableOutputs to reflect possible expansion - // through the presence of "count" or "for_each" on the modules. - for _, o := range c.Module.Outputs { - node := &nodeExpandOutput{ - Addr: addrs.OutputValue{Name: o.Name}, - Module: c.Path, - Config: o, - } - log.Printf("[TRACE] OutputTransformer: adding %s as %T", o.Name, node) - g.Add(node) - } - - return nil -} - -// destroyRootOutputTransformer is a GraphTransformer that adds nodes to delete -// outputs during destroy. We need to do this to ensure that no stale outputs -// are ever left in the state. -type destroyRootOutputTransformer struct { - Destroy bool -} - -func (t *destroyRootOutputTransformer) Transform(g *Graph) error { - // Only clean root outputs on a full destroy - if !t.Destroy { - return nil - } - - for _, v := range g.Vertices() { - output, ok := v.(*nodeExpandOutput) - if !ok { - continue - } - - // We only destroy root outputs - if !output.Module.Equal(addrs.RootModule) { - continue - } - - // create the destroy node for this output - node := &NodeDestroyableOutput{ - Addr: output.Addr.Absolute(addrs.RootModuleInstance), - Config: output.Config, - } - - log.Printf("[TRACE] creating %s", node.Name()) - g.Add(node) - - deps, err := g.Descendents(v) - if err != nil { - return err - } - - // the destroy node must depend on the eval node - deps.Add(v) - - for _, d := range deps { - log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) - g.Connect(dag.BasicEdge(node, d)) - } - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go deleted file mode 100644 index 0051da03..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ /dev/null @@ -1,742 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { - return GraphTransformMulti( - // Add providers from the config - &ProviderConfigTransformer{ - Config: config, - Providers: providers, - Concrete: concrete, - }, - // Add any remaining missing providers - &MissingProviderTransformer{ - Config: config, - Providers: providers, - Concrete: concrete, - }, - // Connect the providers - &ProviderTransformer{ - Config: config, - }, - // Remove unused providers and proxies - &PruneProviderTransformer{}, - // Connect provider to their parent provider nodes - &ParentProviderTransformer{}, - ) -} - -// GraphNodeProvider is an interface that nodes that can be a provider -// must implement. -// -// ProviderAddr returns the address of the provider configuration this -// satisfies, which is relative to the path returned by method Path(). -// -// Name returns the full name of the provider in the config. -type GraphNodeProvider interface { - GraphNodeModulePath - ProviderAddr() addrs.AbsProviderConfig - Name() string -} - -// GraphNodeCloseProvider is an interface that nodes that can be a close -// provider must implement. The CloseProviderName returned is the name of -// the provider they satisfy. -type GraphNodeCloseProvider interface { - GraphNodeModulePath - CloseProviderAddr() addrs.AbsProviderConfig -} - -// GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the address of the provider -// to use, which will be resolved to a configuration either in the same module -// or in an ancestor module, with the resulting absolute address passed to -// SetProvider. -type GraphNodeProviderConsumer interface { - GraphNodeModulePath - // ProvidedBy returns the address of the provider configuration the node - // refers to, if available. The following value types may be returned: - // - // * addrs.LocalProviderConfig: the provider was set in the resource config - // * addrs.AbsProviderConfig + exact true: the provider configuration was - // taken from the instance state. - // * addrs.AbsProviderConfig + exact false: no config or state; the returned - // value is a default provider configuration address for the resource's - // Provider - ProvidedBy() (addr addrs.ProviderConfig, exact bool) - - // Provider() returns the Provider FQN for the node. - Provider() (provider addrs.Provider) - - // Set the resolved provider address for this resource. - SetProvider(addrs.AbsProviderConfig) -} - -// ProviderTransformer is a GraphTransformer that maps resources to providers -// within the graph. This will error if there are any resources that don't map -// to proper resources. -type ProviderTransformer struct { - Config *configs.Config -} - -func (t *ProviderTransformer) Transform(g *Graph) error { - // We need to find a provider configuration address for each resource - // either directly represented by a node or referenced by a node in - // the graph, and then create graph edges from provider to provider user - // so that the providers will get initialized first. - - var diags tfdiags.Diagnostics - - // To start, we'll collect the _requested_ provider addresses for each - // node, which we'll then resolve (handling provider inheritence, etc) in - // the next step. - // Our "requested" map is from graph vertices to string representations of - // provider config addresses (for deduping) to requests. - type ProviderRequest struct { - Addr addrs.AbsProviderConfig - Exact bool // If true, inheritence from parent modules is not attempted - } - requested := map[dag.Vertex]map[string]ProviderRequest{} - needConfigured := map[string]addrs.AbsProviderConfig{} - for _, v := range g.Vertices() { - // Does the vertex _directly_ use a provider? - if pv, ok := v.(GraphNodeProviderConsumer); ok { - requested[v] = make(map[string]ProviderRequest) - - providerAddr, exact := pv.ProvidedBy() - var absPc addrs.AbsProviderConfig - - switch p := providerAddr.(type) { - case addrs.AbsProviderConfig: - // ProvidedBy() returns an AbsProviderConfig when the provider - // configuration is set in state, so we do not need to verify - // the FQN matches. - absPc = p - - if exact { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), absPc) - } - - case addrs.LocalProviderConfig: - // ProvidedBy() return a LocalProviderConfig when the resource - // contains a `provider` attribute - absPc.Provider = pv.Provider() - modPath := pv.ModulePath() - if t.Config == nil { - absPc.Module = modPath - absPc.Alias = p.Alias - break - } - - absPc.Module = modPath - absPc.Alias = p.Alias - - default: - // This should never happen; the case statements are meant to be exhaustive - panic(fmt.Sprintf("%s: provider for %s couldn't be determined", dag.VertexName(v), absPc)) - } - - requested[v][absPc.String()] = ProviderRequest{ - Addr: absPc, - Exact: exact, - } - - // Direct references need the provider configured as well as initialized - needConfigured[absPc.String()] = absPc - } - } - - // Now we'll go through all the requested addresses we just collected and - // figure out which _actual_ config address each belongs to, after resolving - // for provider inheritance and passing. - m := providerVertexMap(g) - for v, reqs := range requested { - for key, req := range reqs { - p := req.Addr - target := m[key] - - _, ok := v.(GraphNodeModulePath) - if !ok && target == nil { - // No target and no path to traverse up from - diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) - continue - } - - if target != nil { - log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) - } - - // if we don't have a provider at this level, walk up the path looking for one, - // unless we were told to be exact. - if target == nil && !req.Exact { - for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { - key := pp.String() - target = m[key] - if target != nil { - log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) - break - } - log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) - } - } - - // If this provider doesn't need to be configured then we can just - // stub it out with an init-only provider node, which will just - // start up the provider and fetch its schema. - if _, exists := needConfigured[key]; target == nil && !exists { - stubAddr := addrs.AbsProviderConfig{ - Module: addrs.RootModule, - Provider: p.Provider, - } - stub := &NodeEvalableProvider{ - &NodeAbstractProvider{ - Addr: stubAddr, - }, - } - m[stubAddr.String()] = stub - log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) - target = stub - g.Add(target) - } - - if target == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider configuration not present", - fmt.Sprintf( - "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", - dag.VertexName(v), p, dag.VertexName(v), - ), - )) - break - } - - // see if this in an inherited provider - if p, ok := target.(*graphNodeProxyProvider); ok { - g.Remove(p) - target = p.Target() - key = target.(GraphNodeProvider).ProviderAddr().String() - } - - log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) - if pv, ok := v.(GraphNodeProviderConsumer); ok { - pv.SetProvider(target.ProviderAddr()) - } - g.Connect(dag.BasicEdge(v, target)) - } - } - - return diags.Err() -} - -// CloseProviderTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provider connections that aren't needed anymore. -// A provider connection is not needed anymore once all depended resources -// in the graph are evaluated. -type CloseProviderTransformer struct{} - -func (t *CloseProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - cpm := make(map[string]*graphNodeCloseProvider) - var err error - - for _, v := range pm { - p := v.(GraphNodeProvider) - key := p.ProviderAddr().String() - - // get the close provider of this type if we alread created it - closer := cpm[key] - - if closer == nil { - // create a closer for this provider type - closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} - g.Add(closer) - cpm[key] = closer - } - - // Close node depends on the provider itself - // this is added unconditionally, so it will connect to all instances - // of the provider. Extra edges will be removed by transitive - // reduction. - g.Connect(dag.BasicEdge(closer, p)) - - // connect all the provider's resources to the close node - for _, s := range g.UpEdges(p) { - if _, ok := s.(GraphNodeProviderConsumer); ok { - g.Connect(dag.BasicEdge(closer, s)) - } - } - } - - return err -} - -// MissingProviderTransformer is a GraphTransformer that adds to the graph -// a node for each default provider configuration that is referenced by another -// node but not already present in the graph. -// -// These "default" nodes are always added to the root module, regardless of -// where they are requested. This is important because our inheritance -// resolution behavior in ProviderTransformer will then treat these as a -// last-ditch fallback after walking up the tree, rather than preferring them -// as it would if they were placed in the same module as the requester. -// -// This transformer may create extra nodes that are not needed in practice, -// due to overriding provider configurations in child modules. -// PruneProviderTransformer can then remove these once ProviderTransformer -// has resolved all of the inheritence, etc. -type MissingProviderTransformer struct { - // Providers is the list of providers we support. - Providers []string - - // MissingProviderTransformer needs the config to rule out _implied_ default providers - Config *configs.Config - - // Concrete, if set, overrides how the providers are made. - Concrete ConcreteProviderNodeFunc -} - -func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Initialize factory - if t.Concrete == nil { - t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { - return a - } - } - - var err error - m := providerVertexMap(g) - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProviderConsumer) - if !ok { - continue - } - - // For our work here we actually care only about the provider type and - // we plan to place all default providers in the root module. - providerFqn := pv.Provider() - - // We're going to create an implicit _default_ configuration for the - // referenced provider type in the _root_ module, ignoring all other - // aspects of the resource's declared provider address. - defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(providerFqn) - key := defaultAddr.String() - provider := m[key] - - if provider != nil { - // There's already an explicit default configuration for this - // provider type in the root module, so we have nothing to do. - continue - } - - log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - - // create the missing top-level provider - provider = t.Concrete(&NodeAbstractProvider{ - Addr: defaultAddr, - }).(GraphNodeProvider) - - g.Add(provider) - m[key] = provider - } - - return err -} - -// ParentProviderTransformer connects provider nodes to their parents. -// -// This works by finding nodes that are both GraphNodeProviders and -// GraphNodeModuleInstance. It then connects the providers to their parent -// path. The parent provider is always at the root level. -type ParentProviderTransformer struct{} - -func (t *ParentProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - for _, v := range g.Vertices() { - // Only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // Also require non-empty path, since otherwise we're in the root - // module and so cannot have a parent. - if len(pn.ModulePath()) <= 1 { - continue - } - - // this provider may be disabled, but we can only get it's name from - // the ProviderName string - addr := pn.ProviderAddr() - parentAddr, ok := addr.Inherited() - if ok { - parent := pm[parentAddr.String()] - if parent != nil { - g.Connect(dag.BasicEdge(v, parent)) - } - } - } - return nil -} - -// PruneProviderTransformer removes any providers that are not actually used by -// anything, and provider proxies. This avoids the provider being initialized -// and configured. This both saves resources but also avoids errors since -// configuration may imply initialization which may require auth. -type PruneProviderTransformer struct{} - -func (t *PruneProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - _, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // ProxyProviders will have up edges, but we're now done with them in the graph - if _, ok := v.(*graphNodeProxyProvider); ok { - log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) - g.Remove(v) - } - - // Remove providers with no dependencies. - if g.UpEdges(v).Len() == 0 { - log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) - g.Remove(v) - } - } - - return nil -} - -func providerVertexMap(g *Graph) map[string]GraphNodeProvider { - m := make(map[string]GraphNodeProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvider); ok { - addr := pv.ProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider { - m := make(map[string]GraphNodeCloseProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvider); ok { - addr := pv.CloseProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -type graphNodeCloseProvider struct { - Addr addrs.AbsProviderConfig -} - -var ( - _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) -) - -func (n *graphNodeCloseProvider) Name() string { - return n.Addr.String() + " (close)" -} - -// GraphNodeModulePath -func (n *graphNodeCloseProvider) ModulePath() addrs.Module { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.Addr) -} - -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} -} - -func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - if !opts.Verbose { - return nil - } - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} - -// RemovableIfNotTargeted -func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to -// store the name and value of a provider node for inheritance between modules. -// These nodes are only used to store the data while loading the provider -// configurations, and are removed after all the resources have been connected -// to their providers. -type graphNodeProxyProvider struct { - addr addrs.AbsProviderConfig - target GraphNodeProvider -} - -var ( - _ GraphNodeModulePath = (*graphNodeProxyProvider)(nil) - _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) -) - -func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.addr -} - -func (n *graphNodeProxyProvider) ModulePath() addrs.Module { - return n.addr.Module -} - -func (n *graphNodeProxyProvider) Name() string { - return n.addr.String() + " (proxy)" -} - -// find the concrete provider instance -func (n *graphNodeProxyProvider) Target() GraphNodeProvider { - switch t := n.target.(type) { - case *graphNodeProxyProvider: - return t.Target() - default: - return n.target - } -} - -// ProviderConfigTransformer adds all provider nodes from the configuration and -// attaches the configs. -type ProviderConfigTransformer struct { - Providers []string - Concrete ConcreteProviderNodeFunc - - // each provider node is stored here so that the proxy nodes can look up - // their targets by name. - providers map[string]GraphNodeProvider - // record providers that can be overriden with a proxy - proxiable map[string]bool - - // Config is the root node of the configuration tree to add providers from. - Config *configs.Config -} - -func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no configuration is given, we don't do anything - if t.Config == nil { - return nil - } - - t.providers = make(map[string]GraphNodeProvider) - t.proxiable = make(map[string]bool) - - // Start the transformation process - if err := t.transform(g, t.Config); err != nil { - return err - } - - // finally attach the configs to the new nodes - return t.attachProviderConfigs(g) -} - -func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { - // If no config, do nothing - if c == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, c); err != nil { - return err - } - - // Transform all the children. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - return nil -} - -func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { - // Get the module associated with this configuration tree node - mod := c.Module - path := c.Path - - // add all providers from the configuration - for _, p := range mod.ProviderConfigs { - fqn := mod.ProviderForLocalConfig(p.Addr()) - addr := addrs.AbsProviderConfig{ - Provider: fqn, - Alias: p.Alias, - Module: path, - } - - abstract := &NodeAbstractProvider{ - Addr: addr, - } - var v dag.Vertex - if t.Concrete != nil { - v = t.Concrete(abstract) - } else { - v = abstract - } - - // Add it to the graph - g.Add(v) - key := addr.String() - t.providers[key] = v.(GraphNodeProvider) - - // A provider configuration is "proxyable" if its configuration is - // entirely empty. This means it's standing in for a provider - // configuration that must be passed in from the parent module. - // We decide this by evaluating the config with an empty schema; - // if this succeeds, then we know there's nothing in the body. - _, diags := p.Config.Content(&hcl.BodySchema{}) - t.proxiable[key] = !diags.HasErrors() - } - - // Now replace the provider nodes with proxy nodes if a provider was being - // passed in, and create implicit proxies if there was no config. Any extra - // proxies will be removed in the prune step. - return t.addProxyProviders(g, c) -} - -func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { - path := c.Path - - // can't add proxies at the root - if len(path) == 0 { - return nil - } - - parentPath, callAddr := path.Call() - parent := c.Parent - if parent == nil { - return nil - } - - callName := callAddr.Name - var parentCfg *configs.ModuleCall - for name, mod := range parent.Module.ModuleCalls { - if name == callName { - parentCfg = mod - break - } - } - - if parentCfg == nil { - // this can't really happen during normal execution. - return fmt.Errorf("parent module config not found for %s", c.Path.String()) - } - - // Go through all the providers the parent is passing in, and add proxies to - // the parent provider nodes. - for _, pair := range parentCfg.Providers { - fqn := c.Module.ProviderForLocalConfig(pair.InChild.Addr()) - fullAddr := addrs.AbsProviderConfig{ - Provider: fqn, - Module: path, - Alias: pair.InChild.Addr().Alias, - } - - fullParentAddr := addrs.AbsProviderConfig{ - Provider: fqn, - Module: parentPath, - Alias: pair.InParent.Addr().Alias, - } - - fullName := fullAddr.String() - fullParentName := fullParentAddr.String() - - parentProvider := t.providers[fullParentName] - - if parentProvider == nil { - return fmt.Errorf("missing provider %s", fullParentName) - } - - proxy := &graphNodeProxyProvider{ - addr: fullAddr, - target: parentProvider, - } - - concreteProvider := t.providers[fullName] - - // replace the concrete node with the provider passed in - if concreteProvider != nil && t.proxiable[fullName] { - g.Replace(concreteProvider, proxy) - t.providers[fullName] = proxy - continue - } - - // aliased configurations can't be implicitly passed in - if fullAddr.Alias != "" { - continue - } - - // There was no concrete provider, so add this as an implicit provider. - // The extra proxy will be pruned later if it's unused. - g.Add(proxy) - t.providers[fullName] = proxy - } - return nil -} - -func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - addr := apn.ProviderAddr() - - // Get the configuration. - mc := t.Config.Descendent(addr.Module) - if mc == nil { - log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) - continue - } - - // Go through the provider configs to find the matching config - for _, p := range mc.Module.ProviderConfigs { - if p.Name == addr.Provider.Type && p.Alias == addr.Alias { - log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go deleted file mode 100644 index b3102665..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go +++ /dev/null @@ -1,179 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeProvisioner is an interface that nodes that can be a provisioner -// must implement. The ProvisionerName returned is the name of the provisioner -// they satisfy. -type GraphNodeProvisioner interface { - ProvisionerName() string -} - -// GraphNodeCloseProvisioner is an interface that nodes that can be a close -// provisioner must implement. The CloseProvisionerName returned is the name -// of the provisioner they satisfy. -type GraphNodeCloseProvisioner interface { - CloseProvisionerName() string -} - -// GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the names of the -// provisioners to use. -type GraphNodeProvisionerConsumer interface { - ProvisionedBy() []string -} - -// ProvisionerTransformer is a GraphTransformer that maps resources to -// provisioners within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProvisionerTransformer struct{} - -func (t *ProvisionerTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to provisioners they need - var err error - m := provisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - if m[p] == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: provisioner %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), p, dag.VertexName(m[p])) - g.Connect(dag.BasicEdge(v, m[p])) - } - } - } - - return err -} - -// MissingProvisionerTransformer is a GraphTransformer that adds nodes -// for missing provisioners into the graph. -type MissingProvisionerTransformer struct { - // Provisioners is the list of provisioners we support. - Provisioners []string -} - -func (t *MissingProvisionerTransformer) Transform(g *Graph) error { - // Create a set of our supported provisioners - supported := make(map[string]struct{}, len(t.Provisioners)) - for _, v := range t.Provisioners { - supported[v] = struct{}{} - } - - // Get the map of provisioners we already have in our graph - m := provisionerVertexMap(g) - - // Go through all the provisioner consumers and make sure we add - // that provisioner if it is missing. - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProvisionerConsumer) - if !ok { - continue - } - - for _, p := range pv.ProvisionedBy() { - if _, ok := m[p]; ok { - // This provisioner already exists as a configure node - continue - } - - if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, we skip it. - // Validation later will catch this as an error. - continue - } - - // Build the vertex - var newV dag.Vertex = &NodeProvisioner{ - NameValue: p, - } - - // Add the missing provisioner node to the graph - m[p] = g.Add(newV) - log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", p, dag.VertexName(v)) - } - } - - return nil -} - -// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provisioner connections that aren't needed -// anymore. A provisioner connection is not needed anymore once all depended -// resources in the graph are evaluated. -type CloseProvisionerTransformer struct{} - -func (t *CloseProvisionerTransformer) Transform(g *Graph) error { - m := closeProvisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - source := m[p] - - if source == nil { - // Create a new graphNodeCloseProvisioner and add it to the graph - source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} - g.Add(source) - - // Make sure we also add the new graphNodeCloseProvisioner to the map - // so we don't create and add any duplicate graphNodeCloseProvisioners. - m[p] = source - } - - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return nil -} - -func provisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisioner); ok { - m[pv.ProvisionerName()] = v - } - } - - return m -} - -func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvisioner); ok { - m[pv.CloseProvisionerName()] = v - } - } - - return m -} - -type graphNodeCloseProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeCloseProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { - return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} -} - -func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { - return n.ProvisionerNameValue -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go deleted file mode 100644 index 5ea5bd8e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ /dev/null @@ -1,475 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// GraphNodeReferenceable must be implemented by any node that represents -// a Terraform thing that can be referenced (resource, module, etc.). -// -// Even if the thing has no name, this should return an empty list. By -// implementing this and returning a non-nil result, you say that this CAN -// be referenced and other methods of referencing may still be possible (such -// as by path!) -type GraphNodeReferenceable interface { - GraphNodeModulePath - - // ReferenceableAddrs returns a list of addresses through which this can be - // referenced. - ReferenceableAddrs() []addrs.Referenceable -} - -// GraphNodeReferencer must be implemented by nodes that reference other -// Terraform items and therefore depend on them. -type GraphNodeReferencer interface { - GraphNodeModulePath - - // References returns a list of references made by this node, which - // include both a referenced address and source location information for - // the reference. - References() []*addrs.Reference -} - -type GraphNodeAttachDependencies interface { - GraphNodeConfigResource - AttachDependencies([]addrs.ConfigResource) -} - -// graphNodeAttachResourceDependencies records all resources that are transitively -// referenced through depends_on in the configuration. This is used by data -// resources to determine if they can be read during the plan, or if they need -// to be further delayed until apply. -// We can only use an addrs.ConfigResource address here, because modules are -// not yet expended in the graph. While this will cause some extra data -// resources to show in the plan when their depends_on references may be in -// unrelated module instances, the fact that it only happens when there are any -// resource updates pending means we can still avoid the problem of the -// "perpetual diff" -type graphNodeAttachResourceDependencies interface { - GraphNodeConfigResource - AttachResourceDependencies([]addrs.ConfigResource) - DependsOn() []*addrs.Reference -} - -// GraphNodeReferenceOutside is an interface that can optionally be implemented. -// A node that implements it can specify that its own referenceable addresses -// and/or the addresses it references are in a different module than the -// node itself. -// -// Any referenceable addresses returned by ReferenceableAddrs are interpreted -// relative to the returned selfPath. -// -// Any references returned by References are interpreted relative to the -// returned referencePath. -// -// It is valid but not required for either of these paths to match what is -// returned by method Path, though if both match the main Path then there -// is no reason to implement this method. -// -// The primary use-case for this is the nodes representing module input -// variables, since their expressions are resolved in terms of their calling -// module, but they are still referenced from their own module. -type GraphNodeReferenceOutside interface { - // ReferenceOutside returns a path in which any references from this node - // are resolved. - ReferenceOutside() (selfPath, referencePath addrs.Module) -} - -// ReferenceTransformer is a GraphTransformer that connects all the -// nodes that reference each other in order to form the proper ordering. -type ReferenceTransformer struct{} - -func (t *ReferenceTransformer) Transform(g *Graph) error { - // Build a reference map so we can efficiently look up the references - vs := g.Vertices() - m := NewReferenceMap(vs) - - // Find the things that reference things and connect them - for _, v := range vs { - if _, ok := v.(GraphNodeDestroyer); ok { - // destroy nodes references are not connected, since they can only - // use their own state. - continue - } - parents := m.References(v) - parentsDbg := make([]string, len(parents)) - for i, v := range parents { - parentsDbg[i] = dag.VertexName(v) - } - log.Printf( - "[DEBUG] ReferenceTransformer: %q references: %v", - dag.VertexName(v), parentsDbg) - - for _, parent := range parents { - g.Connect(dag.BasicEdge(v, parent)) - } - - if len(parents) > 0 { - continue - } - } - - return nil -} - -type depMap map[string]addrs.ConfigResource - -// addDep adds the vertex if it represents a resource in the -// graph. -func (m depMap) add(v dag.Vertex) { - // we're only concerned with resources which may have changes that - // need to be applied. - switch v := v.(type) { - case GraphNodeResourceInstance: - instAddr := v.ResourceInstanceAddr() - addr := instAddr.ContainingResource().Config() - m[addr.String()] = addr - case GraphNodeConfigResource: - addr := v.ResourceAddr() - m[addr.String()] = addr - } -} - -// attachDataResourceDependenciesTransformer records all resources transitively referenced -// through a configuration depends_on. -type attachDataResourceDependenciesTransformer struct { -} - -func (t attachDataResourceDependenciesTransformer) Transform(g *Graph) error { - // First we need to make a map of referenceable addresses to their vertices. - // This is very similar to what's done in ReferenceTransformer, but we keep - // implementation separate as they may need to change independently. - vertices := g.Vertices() - refMap := NewReferenceMap(vertices) - - for _, v := range vertices { - depender, ok := v.(graphNodeAttachResourceDependencies) - if !ok { - continue - } - selfAddr := depender.ResourceAddr() - - // Only data need to attach depends_on, so they can determine if they - // are eligible to be read during plan. - if selfAddr.Resource.Mode != addrs.DataResourceMode { - continue - } - - // depMap will only add resource references and dedupe - m := make(depMap) - - for _, dep := range refMap.DependsOn(v) { - // any the dependency - m.add(dep) - // and check any ancestors - ans, _ := g.Ancestors(dep) - for _, v := range ans { - m.add(v) - } - } - - deps := make([]addrs.ConfigResource, 0, len(m)) - for _, d := range m { - deps = append(deps, d) - } - - log.Printf("[TRACE] AttachDependsOnTransformer: %s depends on %s", depender.ResourceAddr(), deps) - depender.AttachResourceDependencies(deps) - } - - return nil -} - -// AttachDependenciesTransformer records all resource dependencies for each -// instance, and attaches the addresses to the node itself. Managed resource -// will record these in the state for proper ordering of destroy operations. -type AttachDependenciesTransformer struct { -} - -func (t AttachDependenciesTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - attacher, ok := v.(GraphNodeAttachDependencies) - if !ok { - continue - } - selfAddr := attacher.ResourceAddr() - - // Data sources don't need to track destroy dependencies - if selfAddr.Resource.Mode == addrs.DataResourceMode { - continue - } - - ans, err := g.Ancestors(v) - if err != nil { - return err - } - - // dedupe addrs when there's multiple instances involved, or - // multiple paths in the un-reduced graph - depMap := map[string]addrs.ConfigResource{} - for _, d := range ans { - var addr addrs.ConfigResource - - switch d := d.(type) { - case GraphNodeResourceInstance: - instAddr := d.ResourceInstanceAddr() - addr = instAddr.ContainingResource().Config() - case GraphNodeConfigResource: - addr = d.ResourceAddr() - default: - continue - } - - // Data sources don't need to track destroy dependencies - if addr.Resource.Mode == addrs.DataResourceMode { - continue - } - - if addr.Equal(selfAddr) { - continue - } - depMap[addr.String()] = addr - } - - deps := make([]addrs.ConfigResource, 0, len(depMap)) - for _, d := range depMap { - deps = append(deps, d) - } - sort.Slice(deps, func(i, j int) bool { - return deps[i].String() < deps[j].String() - }) - - log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps) - attacher.AttachDependencies(deps) - } - - return nil -} - -// ReferenceMap is a structure that can be used to efficiently check -// for references on a graph, mapping internal reference keys (as produced by -// the mapKey method) to one or more vertices that are identified by each key. -type ReferenceMap map[string][]dag.Vertex - -// References returns the set of vertices that the given vertex refers to, -// and any referenced addresses that do not have corresponding vertices. -func (m ReferenceMap) References(v dag.Vertex) []dag.Vertex { - rn, ok := v.(GraphNodeReferencer) - if !ok { - return nil - } - - var matches []dag.Vertex - - for _, ref := range rn.References() { - subject := ref.Subject - - key := m.referenceMapKey(v, subject) - if _, exists := m[key]; !exists { - // If what we were looking for was a ResourceInstance then we - // might be in a resource-oriented graph rather than an - // instance-oriented graph, and so we'll see if we have the - // resource itself instead. - switch ri := subject.(type) { - case addrs.ResourceInstance: - subject = ri.ContainingResource() - case addrs.ResourceInstancePhase: - subject = ri.ContainingResource() - case addrs.AbsModuleCallOutput: - subject = ri.ModuleCallOutput() - default: - log.Printf("[WARN] ReferenceTransformer: reference not found: %q", subject) - continue - } - key = m.referenceMapKey(v, subject) - } - vertices := m[key] - for _, rv := range vertices { - // don't include self-references - if rv == v { - continue - } - matches = append(matches, rv) - } - } - - return matches -} - -// DependsOn returns the set of vertices that the given vertex refers to from -// the configured depends_on. -func (m ReferenceMap) DependsOn(v dag.Vertex) []dag.Vertex { - depender, ok := v.(graphNodeAttachResourceDependencies) - if !ok { - return nil - } - - var matches []dag.Vertex - - for _, ref := range depender.DependsOn() { - subject := ref.Subject - - key := m.referenceMapKey(v, subject) - vertices, ok := m[key] - if !ok { - log.Printf("[WARN] DependOn: reference not found: %q", subject) - continue - } - for _, rv := range vertices { - // don't include self-references - if rv == v { - continue - } - matches = append(matches, rv) - } - } - - return matches -} - -func (m *ReferenceMap) mapKey(path addrs.Module, addr addrs.Referenceable) string { - return fmt.Sprintf("%s|%s", path.String(), addr.String()) -} - -// vertexReferenceablePath returns the path in which the given vertex can be -// referenced. This is the path that its results from ReferenceableAddrs -// are considered to be relative to. -// -// Only GraphNodeModulePath implementations can be referenced, so this method will -// panic if the given vertex does not implement that interface. -func vertexReferenceablePath(v dag.Vertex) addrs.Module { - sp, ok := v.(GraphNodeModulePath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeModulePath", sp)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex is referenced from a different module than where it was - // declared. - path, _ := outside.ReferenceOutside() - return path - } - - // Vertex is referenced from the same module as where it was declared. - return sp.ModulePath() -} - -// vertexReferencePath returns the path in which references _from_ the given -// vertex must be interpreted. -// -// Only GraphNodeModulePath implementations can have references, so this method -// will panic if the given vertex does not implement that interface. -func vertexReferencePath(v dag.Vertex) addrs.Module { - sp, ok := v.(GraphNodeModulePath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeModulePath", v)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex makes references to objects in a different module than where - // it was declared. - _, path := outside.ReferenceOutside() - return path - } - - // Vertex makes references to objects in the same module as where it - // was declared. - return sp.ModulePath() -} - -// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex -// that the reference is from, and "addr" is the address of the object being -// referenced. -// -// The result is an opaque string that includes both the address of the given -// object and the address of the module instance that object belongs to. -// -// Only GraphNodeModulePath implementations can be referrers, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { - path := vertexReferencePath(referrer) - return m.mapKey(path, addr) -} - -// NewReferenceMap is used to create a new reference map for the -// given set of vertices. -func NewReferenceMap(vs []dag.Vertex) ReferenceMap { - // Build the lookup table - m := make(ReferenceMap) - for _, v := range vs { - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferenceable) - if !ok { - continue - } - - path := vertexReferenceablePath(v) - - // Go through and cache them - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(path, addr) - m[key] = append(m[key], v) - } - } - - return m -} - -// ReferencesFromConfig returns the references that a configuration has -// based on the interpolated variables in a configuration. -func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { - if body == nil { - return nil - } - refs, _ := lang.ReferencesInBlock(body, schema) - return refs -} - -// appendResourceDestroyReferences identifies resource and resource instance -// references in the given slice and appends to it the "destroy-phase" -// equivalents of those references, returning the result. -// -// This can be used in the References implementation for a node which must also -// depend on the destruction of anything it references. -func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { - given := refs - for _, ref := range given { - switch tr := ref.Subject.(type) { - case addrs.Resource: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - case addrs.ResourceInstance: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - } - // FIXME: Using this method in module expansion references, - // May want to refactor this method beyond resources - } - return refs -} - -func modulePrefixStr(p addrs.ModuleInstance) string { - return p.String() -} - -func modulePrefixList(result []string, prefix string) []string { - if prefix != "" { - for i, v := range result { - result[i] = fmt.Sprintf("%s.%s", prefix, v) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go deleted file mode 100644 index 7c354cdf..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" -) - -// RemovedModuleTransformer implements GraphTransformer to add nodes indicating -// when a module was removed from the configuration. -type RemovedModuleTransformer struct { - Config *configs.Config // root node in the config tree - State *states.State -} - -func (t *RemovedModuleTransformer) Transform(g *Graph) error { - // nothing to remove if there's no state! - if t.State == nil { - return nil - } - - removed := map[string]addrs.Module{} - - for _, m := range t.State.Modules { - cc := t.Config.DescendentForInstance(m.Addr) - if cc != nil { - continue - } - removed[m.Addr.Module().String()] = m.Addr.Module() - log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) - } - - // add closers to collect any module instances we're removing - for _, modAddr := range removed { - closer := &nodeCloseModule{ - Addr: modAddr, - } - g.Add(closer) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go deleted file mode 100644 index 70a843ab..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go +++ /dev/null @@ -1,36 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -// -// This assumes that the count is already interpolated. -type ResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - Schema *configschema.Block - - Addr addrs.ConfigResource - InstanceAddrs []addrs.AbsResourceInstance -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - for _, addr := range t.InstanceAddrs { - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - log.Printf("[TRACE] ResourceCountTransformer: adding %s as %T", addr, node) - g.Add(node) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go deleted file mode 100644 index 0c44084b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go +++ /dev/null @@ -1,65 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -const rootNodeName = "root" - -// RootTransformer is a GraphTransformer that adds a root to the graph. -type RootTransformer struct{} - -func (t *RootTransformer) Transform(g *Graph) error { - // If we already have a good root, we're done - if _, err := g.Root(); err == nil { - return nil - } - - // Add a root - var root graphNodeRoot - g.Add(root) - - // Connect the root to all the edges that need it - for _, v := range g.Vertices() { - if v == root { - continue - } - - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(root, v)) - } - } - - return nil -} - -type graphNodeRoot struct{} - -func (n graphNodeRoot) Name() string { - return rootNodeName -} - -// CloseRootModuleTransformer is a GraphTransformer that adds a root to the graph. -type CloseRootModuleTransformer struct{} - -func (t *CloseRootModuleTransformer) Transform(g *Graph) error { - // close the root module - closeRoot := &nodeCloseModule{} - g.Add(closeRoot) - - // since this is closing the root module, make it depend on everything in - // the root module. - for _, v := range g.Vertices() { - if v == closeRoot { - continue - } - - // since this is closing the root module, and must be last, we can - // connect to anything that doesn't have any up edges. - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(closeRoot, v)) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go deleted file mode 100644 index a82f1859..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go +++ /dev/null @@ -1,72 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/states" -) - -// StateTransformer is a GraphTransformer that adds the elements of -// the state to the graph. -// -// This transform is used for example by the DestroyPlanGraphBuilder to ensure -// that only resources that are in the state are represented in the graph. -type StateTransformer struct { - // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract - // resource instance nodes that this transformer will create. - // - // If either of these is nil, the objects of that type will be skipped and - // not added to the graph at all. It doesn't make sense to use this - // transformer without setting at least one of these, since that would - // skip everything and thus be a no-op. - ConcreteCurrent ConcreteResourceInstanceNodeFunc - ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - - State *states.State -} - -func (t *StateTransformer) Transform(g *Graph) error { - if !t.State.HasResources() { - log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") - return nil - } - - switch { - case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") - case t.ConcreteCurrent != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") - case t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") - default: - log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") - } - - for _, ms := range t.State.Modules { - for _, rs := range ms.Resources { - resourceAddr := rs.Addr - - for key, is := range rs.Instances { - addr := resourceAddr.Instance(key) - - if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteCurrent(abstract) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) - } - - if t.ConcreteDeposed != nil { - for dk := range is.Deposed { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteDeposed(abstract, dk) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) - } - } - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go deleted file mode 100644 index a5d4ba93..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ /dev/null @@ -1,263 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - SetTargets([]addrs.Targetable) -} - -// GraphNodeTargetDownstream is an interface for graph nodes that need to -// be remain present under targeting if any of their dependencies are targeted. -// TargetDownstream is called with the set of vertices that are direct -// dependencies for the node, and it should return true if the node must remain -// in the graph in support of those dependencies. -// -// This is used in situations where the dependency edges are representing an -// ordering relationship but the dependency must still be visited if its -// dependencies are visited. This is true for outputs, for example, since -// they must get updated if any of their dependent resources get updated, -// which would not normally be true if one of their dependencies were targeted. -type GraphNodeTargetDownstream interface { - TargetDownstream(targeted, untargeted dag.Set) bool -} - -// TargetsTransformer is a GraphTransformer that, when the user specifies a -// list of resources to target, limits the graph to only those resources and -// their dependencies. -type TargetsTransformer struct { - // List of targeted resource names specified by the user - Targets []addrs.Targetable - - // If set, the index portions of resource addresses will be ignored - // for comparison. This is used when transforming a graph where - // counted resources have not yet been expanded, since otherwise - // the unexpanded nodes (which never have indices) would not match. - IgnoreIndices bool - - // Set to true when we're in a `terraform destroy` or a - // `terraform plan -destroy` - Destroy bool -} - -func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.Targets) - if err != nil { - return err - } - - for _, v := range g.Vertices() { - removable := false - if _, ok := v.(GraphNodeConfigResource); ok { - removable = true - } - - if vr, ok := v.(RemovableIfNotTargeted); ok { - removable = vr.RemoveIfNotTargeted() - } - - if removable && !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } - } - } - - return nil -} - -// Returns a set of targeted nodes. A targeted node is either addressed -// directly, address indirectly via its container, or it's a dependency of a -// targeted node. Destroy mode keeps dependents instead of dependencies. -func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (dag.Set, error) { - targetedNodes := make(dag.Set) - - vertices := g.Vertices() - - for _, v := range vertices { - if t.nodeIsTarget(v, addrs) { - targetedNodes.Add(v) - - // We inform nodes that ask about the list of targets - helps for nodes - // that need to dynamically expand. Note that this only occurs for nodes - // that are already directly targeted. - if tn, ok := v.(GraphNodeTargetable); ok { - tn.SetTargets(addrs) - } - - var deps dag.Set - var err error - if t.Destroy { - deps, err = g.Descendents(v) - } else { - deps, err = g.Ancestors(v) - } - if err != nil { - return nil, err - } - - for _, d := range deps { - targetedNodes.Add(d) - } - } - } - return t.addDependencies(targetedNodes, g) -} - -func (t *TargetsTransformer) addDependencies(targetedNodes dag.Set, g *Graph) (dag.Set, error) { - // Handle nodes that need to be included if their dependencies are included. - // This requires multiple passes since we need to catch transitive - // dependencies if and only if they are via other nodes that also - // support TargetDownstream. For example: - // output -> output -> targeted-resource: both outputs need to be targeted - // output -> non-targeted-resource -> targeted-resource: output not targeted - // - // We'll keep looping until we stop targeting more nodes. - queue := targetedNodes.List() - for len(queue) > 0 { - vertices := queue - queue = nil // ready to append for next iteration if neccessary - for _, v := range vertices { - // providers don't cause transitive dependencies, so don't target - // downstream from them. - if _, ok := v.(GraphNodeProvider); ok { - continue - } - - dependers := g.UpEdges(v) - if dependers == nil { - // indicates that there are no up edges for this node, so - // we have nothing to do here. - continue - } - - dependers = dependers.Filter(func(dv interface{}) bool { - _, ok := dv.(GraphNodeTargetDownstream) - return ok - }) - - if dependers.Len() == 0 { - continue - } - - for _, dv := range dependers { - if targetedNodes.Include(dv) { - // Already present, so nothing to do - continue - } - - // We'll give the node some information about what it's - // depending on in case that informs its decision about whether - // it is safe to be targeted. - deps := g.DownEdges(v) - - depsTargeted := deps.Intersection(targetedNodes) - depsUntargeted := deps.Difference(depsTargeted) - - if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { - targetedNodes.Add(dv) - // Need to visit this node on the next pass to see if it - // has any transitive dependers. - queue = append(queue, dv) - } - } - } - } - - return targetedNodes.Filter(func(dv interface{}) bool { - return filterPartialOutputs(dv, targetedNodes, g) - }), nil -} - -// Outputs may have been included transitively, but if any of their -// dependencies have been pruned they won't be resolvable. -// If nothing depends on the output, and the output is missing any -// dependencies, remove it from the graph. -// This essentially maintains the previous behavior where interpolation in -// outputs would fail silently, but can now surface errors where the output -// is required. -func filterPartialOutputs(v interface{}, targetedNodes dag.Set, g *Graph) bool { - // should this just be done with TargetDownstream? - if _, ok := v.(*NodeApplyableOutput); !ok { - return true - } - - dependers := g.UpEdges(v) - for _, d := range dependers { - if _, ok := d.(*NodeCountBoundary); ok { - continue - } - - if !targetedNodes.Include(d) { - // this one is going to be removed, so it doesn't count - continue - } - - // as soon as we see a real dependency, we mark this as - // non-removable - return true - } - - depends := g.DownEdges(v) - - for _, d := range depends { - if !targetedNodes.Include(d) { - log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", - dag.VertexName(v), dag.VertexName(d)) - return false - } - } - return true -} - -func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { - var vertexAddr addrs.Targetable - switch r := v.(type) { - case GraphNodeResourceInstance: - vertexAddr = r.ResourceInstanceAddr() - case GraphNodeConfigResource: - vertexAddr = r.ResourceAddr() - default: - // Only resource and resource instance nodes can be targeted. - return false - } - - for _, targetAddr := range targets { - if t.IgnoreIndices { - // If we're ignoring indices then we'll convert any resource instance - // addresses into resource addresses. We don't need to convert - // vertexAddr because instance addresses are contained within - // their associated resources, and so .TargetContains will take - // care of this for us. - if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { - targetAddr = instance.ContainingResource() - } - } - if targetAddr.TargetContains(vertexAddr) { - return true - } - } - - return false -} - -// RemovableIfNotTargeted is a special interface for graph nodes that -// aren't directly addressable, but need to be removed from the graph when they -// are not targeted. (Nodes that are not directly targeted end up in the set of -// targeted nodes because something that _is_ targeted depends on them.) The -// initial use case for this interface is GraphNodeConfigVariable, which was -// having trouble interpolating for module variables in targeted scenarios that -// filtered out the resource node being referenced. -type RemovableIfNotTargeted interface { - RemoveIfNotTargeted() bool -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go deleted file mode 100644 index 21842789..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// TransitiveReductionTransformer is a GraphTransformer that performs -// finds the transitive reduction of the graph. For a definition of -// transitive reduction, see Wikipedia. -type TransitiveReductionTransformer struct{} - -func (t *TransitiveReductionTransformer) Transform(g *Graph) error { - // If the graph isn't valid, skip the transitive reduction. - // We don't error here because Terraform itself handles graph - // validation in a better way, or we assume it does. - if err := g.Validate(); err != nil { - return nil - } - - // Do it - g.TransitiveReduction() - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go deleted file mode 100644 index 05daa513..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// RootVariableTransformer is a GraphTransformer that adds all the root -// variables to the graph. -// -// Root variables are currently no-ops but they must be added to the -// graph since downstream things that depend on them must be able to -// reach them. -type RootVariableTransformer struct { - Config *configs.Config -} - -func (t *RootVariableTransformer) Transform(g *Graph) error { - // We can have no variables if we have no config. - if t.Config == nil { - return nil - } - - // We're only considering root module variables here, since child - // module variables are handled by ModuleVariableTransformer. - vars := t.Config.Module.Variables - - // Add all variables here - for _, v := range vars { - node := &NodeRootVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Config: v, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go deleted file mode 100644 index 6b1293fc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// VertexTransformer is a GraphTransformer that transforms vertices -// using the GraphVertexTransformers. The Transforms are run in sequential -// order. If a transform replaces a vertex then the next transform will see -// the new vertex. -type VertexTransformer struct { - Transforms []GraphVertexTransformer -} - -func (t *VertexTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - for _, vt := range t.Transforms { - newV, err := vt.Transform(v) - if err != nil { - return err - } - - // If the vertex didn't change, then don't do anything more - if newV == v { - continue - } - - // Vertex changed, replace it within the graph - if ok := g.Replace(v, newV); !ok { - // This should never happen, big problem - return fmt.Errorf( - "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", - dag.VertexName(v), dag.VertexName(newV), v, newV) - } - - // Replace v so that future transforms use the proper vertex - v = newV - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go deleted file mode 100644 index f6790d9e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -import "context" - -// UIInput is the interface that must be implemented to ask for input -// from this user. This should forward the request to wherever the user -// inputs things to ask for values. -type UIInput interface { - Input(context.Context, *InputOpts) (string, error) -} - -// InputOpts are options for asking for input. -type InputOpts struct { - // Id is a unique ID for the question being asked that might be - // used for logging or to look up a prior answered question. - Id string - - // Query is a human-friendly question for inputting this value. - Query string - - // Description is a description about what this option is. Be wary - // that this will probably be in a terminal so split lines as you see - // necessary. - Description string - - // Default will be the value returned if no data is entered. - Default string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go deleted file mode 100644 index e2d9c384..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import "context" - -// MockUIInput is an implementation of UIInput that can be used for tests. -type MockUIInput struct { - InputCalled bool - InputOpts *InputOpts - InputReturnMap map[string]string - InputReturnString string - InputReturnError error - InputFn func(*InputOpts) (string, error) -} - -func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - i.InputCalled = true - i.InputOpts = opts - if i.InputFn != nil { - return i.InputFn(opts) - } - if i.InputReturnMap != nil { - return i.InputReturnMap[opts.Id], i.InputReturnError - } - return i.InputReturnString, i.InputReturnError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go deleted file mode 100644 index b5d32b1e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -import ( - "context" - "fmt" -) - -// PrefixUIInput is an implementation of UIInput that prefixes the ID -// with a string, allowing queries to be namespaced. -type PrefixUIInput struct { - IdPrefix string - QueryPrefix string - UIInput UIInput -} - -func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) - opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(ctx, opts) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go deleted file mode 100644 index 84427c63..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// UIOutput is the interface that must be implemented to output -// data to the end user. -type UIOutput interface { - Output(string) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go deleted file mode 100644 index 135a91c5..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -type CallbackUIOutput struct { - OutputFn func(string) -} - -func (o *CallbackUIOutput) Output(v string) { - o.OutputFn(v) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go deleted file mode 100644 index d828c921..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ /dev/null @@ -1,21 +0,0 @@ -package terraform - -import "sync" - -// MockUIOutput is an implementation of UIOutput that can be used for tests. -type MockUIOutput struct { - sync.Mutex - OutputCalled bool - OutputMessage string - OutputFn func(string) -} - -func (o *MockUIOutput) Output(v string) { - o.Lock() - defer o.Unlock() - o.OutputCalled = true - o.OutputMessage = v - if o.OutputFn != nil { - o.OutputFn(v) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go deleted file mode 100644 index fff964f4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" -) - -// ProvisionerUIOutput is an implementation of UIOutput that calls a hook -// for the output so that the hooks can handle it. -type ProvisionerUIOutput struct { - InstanceAddr addrs.AbsResourceInstance - ProvisionerType string - Hooks []Hook -} - -func (o *ProvisionerUIOutput) Output(msg string) { - for _, h := range o.Hooks { - h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go deleted file mode 100644 index 97f1ec1f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go +++ /dev/null @@ -1,12 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/httpclient" -) - -// Generate a UserAgent string -// -// Deprecated: Use httpclient.UserAgent(version) instead -func UserAgentString() string { - return httpclient.UserAgentString() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go deleted file mode 100644 index 7966b58d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/util.go +++ /dev/null @@ -1,75 +0,0 @@ -package terraform - -import ( - "sort" -) - -// Semaphore is a wrapper around a channel to provide -// utility methods to clarify that we are treating the -// channel as a semaphore -type Semaphore chan struct{} - -// NewSemaphore creates a semaphore that allows up -// to a given limit of simultaneous acquisitions -func NewSemaphore(n int) Semaphore { - if n <= 0 { - panic("semaphore with limit <=0") - } - ch := make(chan struct{}, n) - return Semaphore(ch) -} - -// Acquire is used to acquire an available slot. -// Blocks until available. -func (s Semaphore) Acquire() { - s <- struct{}{} -} - -// TryAcquire is used to do a non-blocking acquire. -// Returns a bool indicating success -func (s Semaphore) TryAcquire() bool { - select { - case s <- struct{}{}: - return true - default: - return false - } -} - -// Release is used to return a slot. Acquire must -// be called as a pre-condition. -func (s Semaphore) Release() { - select { - case <-s: - default: - panic("release without an acquire") - } -} - -// strSliceContains checks if a given string is contained in a slice -// When anybody asks why Go needs generics, here you go. -func strSliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// deduplicate a slice of strings -func uniqueStrings(s []string) []string { - if len(s) < 2 { - return s - } - - sort.Strings(s) - result := make([]string, 1, len(s)) - result[0] = s[0] - for i := 1; i < len(s); i++ { - if s[i] != result[len(result)-1] { - result = append(result, s[i]) - } - } - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go deleted file mode 100644 index 627593d7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValueFromUnknown-0] - _ = x[ValueFromConfig-67] - _ = x[ValueFromAutoFile-70] - _ = x[ValueFromNamedFile-78] - _ = x[ValueFromCLIArg-65] - _ = x[ValueFromEnvVar-69] - _ = x[ValueFromInput-73] - _ = x[ValueFromPlan-80] - _ = x[ValueFromCaller-83] -} - -const ( - _ValueSourceType_name_0 = "ValueFromUnknown" - _ValueSourceType_name_1 = "ValueFromCLIArg" - _ValueSourceType_name_2 = "ValueFromConfig" - _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" - _ValueSourceType_name_4 = "ValueFromInput" - _ValueSourceType_name_5 = "ValueFromNamedFile" - _ValueSourceType_name_6 = "ValueFromPlan" - _ValueSourceType_name_7 = "ValueFromCaller" -) - -var ( - _ValueSourceType_index_3 = [...]uint8{0, 15, 32} -) - -func (i ValueSourceType) String() string { - switch { - case i == 0: - return _ValueSourceType_name_0 - case i == 65: - return _ValueSourceType_name_1 - case i == 67: - return _ValueSourceType_name_2 - case 69 <= i && i <= 70: - i -= 69 - return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] - case i == 73: - return _ValueSourceType_name_4 - case i == 78: - return _ValueSourceType_name_5 - case i == 80: - return _ValueSourceType_name_6 - case i == 83: - return _ValueSourceType_name_7 - default: - return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go deleted file mode 100644 index 14f6a3cc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/variables.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" -) - -// InputValue represents a value for a variable in the root module, provided -// as part of the definition of an operation. -type InputValue struct { - Value cty.Value - SourceType ValueSourceType - - // SourceRange provides source location information for values whose - // SourceType is either ValueFromConfig or ValueFromFile. It is not - // populated for other source types, and so should not be used. - SourceRange tfdiags.SourceRange -} - -// ValueSourceType describes what broad category of source location provided -// a particular value. -type ValueSourceType rune - -const ( - // ValueFromUnknown is the zero value of ValueSourceType and is not valid. - ValueFromUnknown ValueSourceType = 0 - - // ValueFromConfig indicates that a value came from a .tf or .tf.json file, - // e.g. the default value defined for a variable. - ValueFromConfig ValueSourceType = 'C' - - // ValueFromAutoFile indicates that a value came from a "values file", like - // a .tfvars file, that was implicitly loaded by naming convention. - ValueFromAutoFile ValueSourceType = 'F' - - // ValueFromNamedFile indicates that a value came from a named "values file", - // like a .tfvars file, that was passed explicitly on the command line (e.g. - // -var-file=foo.tfvars). - ValueFromNamedFile ValueSourceType = 'N' - - // ValueFromCLIArg indicates that the value was provided directly in - // a CLI argument. The name of this argument is not recorded and so it must - // be inferred from context. - ValueFromCLIArg ValueSourceType = 'A' - - // ValueFromEnvVar indicates that the value was provided via an environment - // variable. The name of the variable is not recorded and so it must be - // inferred from context. - ValueFromEnvVar ValueSourceType = 'E' - - // ValueFromInput indicates that the value was provided at an interactive - // input prompt. - ValueFromInput ValueSourceType = 'I' - - // ValueFromPlan indicates that the value was retrieved from a stored plan. - ValueFromPlan ValueSourceType = 'P' - - // ValueFromCaller indicates that the value was explicitly overridden by - // a caller to Context.SetVariable after the context was constructed. - ValueFromCaller ValueSourceType = 'S' -) - -func (v *InputValue) GoString() string { - if (v.SourceRange != tfdiags.SourceRange{}) { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) - } else { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) - } -} - -func (v ValueSourceType) GoString() string { - return fmt.Sprintf("terraform.%s", v) -} - -//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType - -// InputValues is a map of InputValue instances. -type InputValues map[string]*InputValue - -// InputValuesFromCaller turns the given map of naked values into an -// InputValues that attributes each value to "a caller", using the source -// type ValueFromCaller. This is primarily useful for testing purposes. -// -// This should not be used as a general way to convert map[string]cty.Value -// into InputValues, since in most real cases we want to set a suitable -// other SourceType and possibly SourceRange value. -func InputValuesFromCaller(vals map[string]cty.Value) InputValues { - ret := make(InputValues, len(vals)) - for k, v := range vals { - ret[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } - } - return ret -} - -// Override merges the given value maps with the receiver, overriding any -// conflicting keys so that the latest definition wins. -func (vv InputValues) Override(others ...InputValues) InputValues { - // FIXME: This should check to see if any of the values are maps and - // merge them if so, in order to preserve the behavior from prior to - // Terraform 0.12. - ret := make(InputValues) - for k, v := range vv { - ret[k] = v - } - for _, other := range others { - for k, v := range other { - ret[k] = v - } - } - return ret -} - -// JustValues returns a map that just includes the values, discarding the -// source information. -func (vv InputValues) JustValues() map[string]cty.Value { - ret := make(map[string]cty.Value, len(vv)) - for k, v := range vv { - ret[k] = v.Value - } - return ret -} - -// DefaultVariableValues returns an InputValues map representing the default -// values specified for variables in the given configuration map. -func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { - ret := make(InputValues) - for k, c := range configs { - if c.Default == cty.NilVal { - continue - } - ret[k] = &InputValue{ - Value: c.Default, - SourceType: ValueFromConfig, - SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), - } - } - return ret -} - -// SameValues returns true if the given InputValues has the same values as -// the receiever, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) SameValues(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - } - - return true -} - -// HasValues returns true if the reciever has the same values as in the given -// map, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) HasValues(vals map[string]cty.Value) bool { - if len(vv) != len(vals) { - return false - } - - for k, v := range vv { - oVal, exists := vals[k] - if !exists { - return false - } - if !v.Value.RawEquals(oVal) { - return false - } - } - - return true -} - -// Identical returns true if the given InputValues has the same values, -// source types, and source ranges as the receiver. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -// -// This method is primarily for testing. For most practical purposes, it's -// better to use SameValues or HasValues. -func (vv InputValues) Identical(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - if v.SourceType != ov.SourceType { - return false - } - if v.SourceRange != ov.SourceRange { - return false - } - } - - return true -} - -// checkInputVariables ensures that variable values supplied at the UI conform -// to their corresponding declarations in configuration. -// -// The set of values is considered valid only if the returned diagnostics -// does not contain errors. A valid set of values may still produce warnings, -// which should be returned to the user. -func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for name, vc := range vcs { - val, isSet := vs[name] - if !isSet { - // Always an error, since the caller should already have included - // default values from the configuration in the values map. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unassigned variable", - fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), - )) - continue - } - - wantType := vc.Type - - // A given value is valid if it can convert to the desired type. - _, err := convert.Convert(val.Value, wantType) - if err != nil { - switch val.SourceType { - case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: - // We have source location information for these. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for input variable", - Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), - Subject: val.SourceRange.ToHCL().Ptr(), - }) - case ValueFromEnvVar: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromCLIArg: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromInput: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), - )) - default: - // The above gets us good coverage for the situations users - // are likely to encounter with their own inputs. The other - // cases are generally implementation bugs, so we'll just - // use a generic error for these. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), - )) - } - } - } - - // Check for any variables that are assigned without being configured. - // This is always an implementation error in the caller, because we - // expect undefined variables to be caught during context construction - // where there is better context to report it well. - for name := range vs { - if _, defined := vcs[name]; !defined { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Value assigned to undeclared variable", - fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), - )) - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go deleted file mode 100644 index 0caeca0a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ /dev/null @@ -1,10 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/version" -) - -// Deprecated: Providers should use schema.Provider.TerraformVersion instead -func VersionString() string { - return version.String() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go deleted file mode 100644 index ba9af1d1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/configs" - - tfversion "github.com/hashicorp/terraform/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { - if config == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := config.Module - - for _, constraint := range module.CoreVersionConstraints { - if !constraint.Required.Check(tfversion.SemVer) { - switch { - case len(config.Path) == 0: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - config.Path, config.SourceAddr, tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - } - } - } - - for _, c := range config.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go deleted file mode 100644 index 0666aa5f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[walkInvalid-0] - _ = x[walkApply-1] - _ = x[walkPlan-2] - _ = x[walkPlanDestroy-3] - _ = x[walkRefresh-4] - _ = x[walkValidate-5] - _ = x[walkDestroy-6] - _ = x[walkImport-7] - _ = x[walkEval-8] -} - -const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" - -var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} - -func (i walkOperation) String() string { - if i >= walkOperation(len(_walkOperation_index)-1) { - return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go deleted file mode 100644 index 8e41f46e..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go +++ /dev/null @@ -1,68 +0,0 @@ -package tfdiags - -import ( - "bytes" - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" -) - -// FormatCtyPath is a helper function to produce a user-friendly string -// representation of a cty.Path. The result uses a syntax similar to the -// HCL expression language in the hope of it being familiar to users. -func FormatCtyPath(path cty.Path) string { - var buf bytes.Buffer - for _, step := range path { - switch ts := step.(type) { - case cty.GetAttrStep: - fmt.Fprintf(&buf, ".%s", ts.Name) - case cty.IndexStep: - buf.WriteByte('[') - key := ts.Key - keyTy := key.Type() - switch { - case key.IsNull(): - buf.WriteString("null") - case !key.IsKnown(): - buf.WriteString("(not yet known)") - case keyTy == cty.Number: - bf := key.AsBigFloat() - buf.WriteString(bf.Text('g', -1)) - case keyTy == cty.String: - buf.WriteString(strconv.Quote(key.AsString())) - default: - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} - -// FormatError is a helper function to produce a user-friendly string -// representation of certain special error types that we might want to -// include in diagnostic messages. -// -// This currently has special behavior only for cty.PathError, where a -// non-empty path is rendered in a HCL-like syntax as context. -func FormatError(err error) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return err.Error() - } - - return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) -} - -// FormatErrorPrefixed is like FormatError except that it presents any path -// information after the given prefix string, which is assumed to contain -// an HCL syntax representation of the value that errors are relative to. -func FormatErrorPrefixed(err error, prefix string) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return fmt.Sprintf("%s: %s", prefix, err.Error()) - } - - return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go b/vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go deleted file mode 100644 index 06f3d52c..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go +++ /dev/null @@ -1,146 +0,0 @@ -package tfdiags - -import "fmt" - -// ConsolidateWarnings checks if there is an unreasonable amount of warnings -// with the same summary in the receiver and, if so, returns a new diagnostics -// with some of those warnings consolidated into a single warning in order -// to reduce the verbosity of the output. -// -// This mechanism is here primarily for diagnostics printed out at the CLI. In -// other contexts it is likely better to just return the warnings directly, -// particularly if they are going to be interpreted by software rather than -// by a human reader. -// -// The returned slice always has a separate backing array from the reciever, -// but some diagnostic values themselves might be shared. -// -// The definition of "unreasonable" is given as the threshold argument. At most -// that many warnings with the same summary will be shown. -func (diags Diagnostics) ConsolidateWarnings(threshold int) Diagnostics { - if len(diags) == 0 { - return nil - } - - newDiags := make(Diagnostics, 0, len(diags)) - - // We'll track how many times we've seen each warning summary so we can - // decide when to start consolidating. Once we _have_ started consolidating, - // we'll also track the object representing the consolidated warning - // so we can continue appending to it. - warningStats := make(map[string]int) - warningGroups := make(map[string]*warningGroup) - - for _, diag := range diags { - severity := diag.Severity() - if severity != Warning || diag.Source().Subject == nil { - // Only warnings can get special treatment, and we only - // consolidate warnings that have source locations because - // our primary goal here is to deal with the situation where - // some configuration language feature is producing a warning - // each time it's used across a potentially-large config. - newDiags = newDiags.Append(diag) - continue - } - - desc := diag.Description() - summary := desc.Summary - if g, ok := warningGroups[summary]; ok { - // We're already grouping this one, so we'll just continue it. - g.Append(diag) - continue - } - - warningStats[summary]++ - if warningStats[summary] == threshold { - // Initially creating the group doesn't really change anything - // visibly in the result, since a group with only one warning - // is just a passthrough anyway, but once we do this any additional - // warnings with the same summary will get appended to this group. - g := &warningGroup{} - newDiags = newDiags.Append(g) - warningGroups[summary] = g - g.Append(diag) - continue - } - - // If this warning is not consolidating yet then we'll just append - // it directly. - newDiags = newDiags.Append(diag) - } - - return newDiags -} - -// A warningGroup is one or more warning diagnostics grouped together for -// UI consolidation purposes. -// -// A warningGroup with only one diagnostic in it is just a passthrough for -// that one diagnostic. If it has more than one then it will behave mostly -// like the first one but its detail message will include an additional -// sentence mentioning the consolidation. A warningGroup with no diagnostics -// at all is invalid and will panic when used. -type warningGroup struct { - Warnings Diagnostics -} - -var _ Diagnostic = (*warningGroup)(nil) - -func (wg *warningGroup) Severity() Severity { - return wg.Warnings[0].Severity() -} - -func (wg *warningGroup) Description() Description { - desc := wg.Warnings[0].Description() - if len(wg.Warnings) < 2 { - return desc - } - extraCount := len(wg.Warnings) - 1 - var msg string - switch extraCount { - case 1: - msg = "(and one more similar warning elsewhere)" - default: - msg = fmt.Sprintf("(and %d more similar warnings elsewhere)", extraCount) - } - if desc.Detail != "" { - desc.Detail = desc.Detail + "\n\n" + msg - } else { - desc.Detail = msg - } - return desc -} - -func (wg *warningGroup) Source() Source { - return wg.Warnings[0].Source() -} - -func (wg *warningGroup) FromExpr() *FromExpr { - return wg.Warnings[0].FromExpr() -} - -func (wg *warningGroup) Append(diag Diagnostic) { - if diag.Severity() != Warning { - panic("can't append a non-warning diagnostic to a warningGroup") - } - wg.Warnings = append(wg.Warnings, diag) -} - -// WarningGroupSourceRanges can be used in conjunction with -// Diagnostics.ConsolidateWarnings to recover the full set of original source -// locations from a consolidated warning. -// -// For convenience, this function accepts any diagnostic and will just return -// the single Source value from any diagnostic that isn't a warning group. -func WarningGroupSourceRanges(diag Diagnostic) []Source { - wg, ok := diag.(*warningGroup) - if !ok { - return []Source{diag.Source()} - } - - ret := make([]Source, len(wg.Warnings)) - for i, wrappedDiag := range wg.Warnings { - ret[i] = wrappedDiag.Source() - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go deleted file mode 100644 index d55bc2f0..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go +++ /dev/null @@ -1,372 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// The "contextual" family of diagnostics are designed to allow separating -// the detection of a problem from placing that problem in context. For -// example, some code that is validating an object extracted from configuration -// may not have access to the configuration that generated it, but can still -// report problems within that object which the caller can then place in -// context by calling IsConfigBody on the returned diagnostics. -// -// When contextual diagnostics are used, the documentation for a method must -// be very explicit about what context is implied for any diagnostics returned, -// to help ensure the expected result. - -// contextualFromConfig is an interface type implemented by diagnostic types -// that can elaborate themselves when given information about the configuration -// body they are embedded in. -// -// Usually this entails extracting source location information in order to -// populate the "Subject" range. -type contextualFromConfigBody interface { - ElaborateFromConfigBody(hcl.Body) Diagnostic -} - -// InConfigBody returns a copy of the receiver with any config-contextual -// diagnostics elaborated in the context of the given body. -func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { - if len(d) == 0 { - return nil - } - - ret := make(Diagnostics, len(d)) - for i, srcDiag := range d { - if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { - ret[i] = cd.ElaborateFromConfigBody(body) - } else { - ret[i] = srcDiag - } - } - - return ret -} - -// AttributeValue returns a diagnostic about an attribute value in an implied current -// configuration context. This should be returned only from functions whose -// interface specifies a clear configuration context that this will be -// resolved in. -// -// The given path is relative to the implied configuration context. To describe -// a top-level attribute, it should be a single-element cty.Path with a -// cty.GetAttrStep. It's assumed that the path is returning into a structure -// that would be produced by our conventions in the configschema package; it -// may return unexpected results for structures that can't be represented by -// configschema. -// -// Since mapping attribute paths back onto configuration is an imprecise -// operation (e.g. dynamic block generation may cause the same block to be -// evaluated multiple times) the diagnostic detail should include the attribute -// name and other context required to help the user understand what is being -// referenced in case the identified source range is not unique. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is the value assigned to the -// named attribute, or the containing body's "missing item range" if no -// value is present. -func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { - return &attributeDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - attrPath: attrPath, - } -} - -// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains -// one. Normally this is not accessed directly, and instead the config body is -// added to the Diagnostic to create a more complete message for the user. In -// some cases however, we may want to know just the name of the attribute that -// generated the Diagnostic message. -// This returns a nil cty.Path if it does not exist in the Diagnostic. -func GetAttribute(d Diagnostic) cty.Path { - if d, ok := d.(*attributeDiagnostic); ok { - return d.attrPath - } - return nil -} - -type attributeDiagnostic struct { - diagnosticBase - attrPath cty.Path - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -// ElaborateFromConfigBody finds the most accurate possible source location -// for a diagnostic's attribute path within the given body. -// -// Backing out from a path back to a source location is not always entirely -// possible because we lose some information in the decoding process, so -// if an exact position cannot be found then the returned diagnostic will -// refer to a position somewhere within the containing body, which is assumed -// to be better than no location at all. -// -// If possible it is generally better to report an error at a layer where -// source location information is still available, for more accuracy. This -// is not always possible due to system architecture, so this serves as a -// "best effort" fallback behavior for such situations. -func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if len(d.attrPath) < 1 { - // Should never happen, but we'll allow it rather than crashing. - return d - } - - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - - // This function will often end up re-decoding values that were already - // decoded by an earlier step. This is non-ideal but is architecturally - // more convenient than arranging for source location information to be - // propagated to every place in Terraform, and this happens only in the - // presence of errors where performance isn't a concern. - - traverse := d.attrPath[:] - final := d.attrPath[len(d.attrPath)-1] - - // Index should never be the first step - // as indexing of top blocks (such as resources & data sources) - // is handled elsewhere - if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret - } - - // Process index separately - idxStep, hasIdx := final.(cty.IndexStep) - if hasIdx { - final = d.attrPath[len(d.attrPath)-2] - traverse = d.attrPath[:len(d.attrPath)-1] - } - - // If we have more than one step after removing index - // then we'll first try to traverse to a child body - // corresponding to the requested path. - if len(traverse) > 1 { - body = traversePathSteps(traverse, body) - } - - // Default is to indicate a missing item in the deepest body we reached - // while traversing. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - - // Once we get here, "final" should be a GetAttr step that maps to an - // attribute in our current body. - finalStep, isAttr := final.(cty.GetAttrStep) - if !isAttr { - return &ret - } - - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: finalStep.Name, - Required: true, - }, - }, - }) - if contentDiags.HasErrors() { - return &ret - } - - if attr, ok := content.Attributes[finalStep.Name]; ok { - hclRange := attr.Expr.Range() - if hasIdx { - // Try to be more precise by finding index range - hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) - } - subject = SourceRangeFromHCL(hclRange) - ret.subject = &subject - } - - return &ret -} - -func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { - for i := 0; i < len(traverse); i++ { - step := traverse[i] - - switch tStep := step.(type) { - case cty.GetAttrStep: - - var next cty.PathStep - if i < (len(traverse) - 1) { - next = traverse[i+1] - } - - // Will be indexing into our result here? - var indexType cty.Type - var indexVal cty.Value - if nextIndex, ok := next.(cty.IndexStep); ok { - indexVal = nextIndex.Key - indexType = indexVal.Type() - i++ // skip over the index on subsequent iterations - } - - var blockLabelNames []string - if indexType == cty.String { - // Map traversal means we expect one label for the key. - blockLabelNames = []string{"key"} - } - - // For intermediate steps we expect to be referring to a child - // block, so we'll attempt decoding under that assumption. - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: tStep.Name, - LabelNames: blockLabelNames, - }, - }, - }) - if contentDiags.HasErrors() { - return body - } - filtered := make([]*hcl.Block, 0, len(content.Blocks)) - for _, block := range content.Blocks { - if block.Type == tStep.Name { - filtered = append(filtered, block) - } - } - if len(filtered) == 0 { - // Step doesn't refer to a block - continue - } - - switch indexType { - case cty.NilType: // no index at all - if len(filtered) != 1 { - return body - } - body = filtered[0].Body - case cty.Number: - var idx int - err := gocty.FromCtyValue(indexVal, &idx) - if err != nil || idx >= len(filtered) { - return body - } - body = filtered[idx].Body - case cty.String: - key := indexVal.AsString() - var block *hcl.Block - for _, candidate := range filtered { - if candidate.Labels[0] == key { - block = candidate - break - } - } - if block == nil { - // No block with this key, so we'll just indicate a - // missing item in the containing block. - return body - } - body = block.Body - default: - // Should never happen, because only string and numeric indices - // are supported by cty collections. - return body - } - - default: - // For any other kind of step, we'll just return our current body - // as the subject and accept that this is a little inaccurate. - return body - } - } - return body -} - -func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { - switch idxStep.Key.Type() { - case cty.Number: - var idx int - err := gocty.FromCtyValue(idxStep.Key, &idx) - items, diags := hcl.ExprList(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - if err != nil || idx >= len(items) { - return attr.NameRange - } - return items[idx].Range() - case cty.String: - pairs, diags := hcl.ExprMap(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - stepKey := idxStep.Key.AsString() - for _, kvPair := range pairs { - key, diags := kvPair.Key.Value(nil) - if diags.HasErrors() { - return attr.Expr.Range() - } - if key.AsString() == stepKey { - startRng := kvPair.Value.StartRange() - return startRng - } - } - return attr.NameRange - } - return attr.Expr.Range() -} - -func (d *attributeDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} - -// WholeContainingBody returns a diagnostic about the body that is an implied -// current configuration context. This should be returned only from -// functions whose interface specifies a clear configuration context that this -// will be resolved in. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is currently the missing item -// range of the body. In future, this may change to some other suitable -// part of the containing body. -func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { - return &wholeBodyDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - } -} - -type wholeBodyDiagnostic struct { - diagnosticBase - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - rng := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &rng - return &ret -} - -func (d *wholeBodyDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go deleted file mode 100644 index a7699cf0..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go +++ /dev/null @@ -1,40 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -type Diagnostic interface { - Severity() Severity - Description() Description - Source() Source - - // FromExpr returns the expression-related context for the diagnostic, if - // available. Returns nil if the diagnostic is not related to an - // expression evaluation. - FromExpr() *FromExpr -} - -type Severity rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity - -const ( - Error Severity = 'E' - Warning Severity = 'W' -) - -type Description struct { - Summary string - Detail string -} - -type Source struct { - Subject *SourceRange - Context *SourceRange -} - -type FromExpr struct { - Expression hcl.Expression - EvalContext *hcl.EvalContext -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go deleted file mode 100644 index 50bf9d8e..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go +++ /dev/null @@ -1,31 +0,0 @@ -package tfdiags - -// diagnosticBase can be embedded in other diagnostic structs to get -// default implementations of Severity and Description. This type also -// has default implementations of Source and FromExpr that return no source -// location or expression-related information, so embedders should generally -// override those method to return more useful results where possible. -type diagnosticBase struct { - severity Severity - summary string - detail string -} - -func (d diagnosticBase) Severity() Severity { - return d.severity -} - -func (d diagnosticBase) Description() Description { - return Description{ - Summary: d.summary, - Detail: d.detail, - } -} - -func (d diagnosticBase) Source() Source { - return Source{} -} - -func (d diagnosticBase) FromExpr() *FromExpr { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go deleted file mode 100644 index 30476ee2..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go +++ /dev/null @@ -1,330 +0,0 @@ -package tfdiags - -import ( - "bytes" - "fmt" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" -) - -// Diagnostics is a list of diagnostics. Diagnostics is intended to be used -// where a Go "error" might normally be used, allowing richer information -// to be conveyed (more context, support for warnings). -// -// A nil Diagnostics is a valid, empty diagnostics list, thus allowing -// heap allocation to be avoided in the common case where there are no -// diagnostics to report at all. -type Diagnostics []Diagnostic - -// Append is the main interface for constructing Diagnostics lists, taking -// an existing list (which may be nil) and appending the new objects to it -// after normalizing them to be implementations of Diagnostic. -// -// The usual pattern for a function that natively "speaks" diagnostics is: -// -// // Create a nil Diagnostics at the start of the function -// var diags diag.Diagnostics -// -// // At later points, build on it if errors / warnings occur: -// foo, err := DoSomethingRisky() -// if err != nil { -// diags = diags.Append(err) -// } -// -// // Eventually return the result and diagnostics in place of error -// return result, diags -// -// Append accepts a variety of different diagnostic-like types, including -// native Go errors and HCL diagnostics. It also knows how to unwrap -// a multierror.Error into separate error diagnostics. It can be passed -// another Diagnostics to concatenate the two lists. If given something -// it cannot handle, this function will panic. -func (diags Diagnostics) Append(new ...interface{}) Diagnostics { - for _, item := range new { - if item == nil { - continue - } - - switch ti := item.(type) { - case Diagnostic: - diags = append(diags, ti) - case Diagnostics: - diags = append(diags, ti...) // flatten - case diagnosticsAsError: - diags = diags.Append(ti.Diagnostics) // unwrap - case NonFatalError: - diags = diags.Append(ti.Diagnostics) // unwrap - case hcl.Diagnostics: - for _, hclDiag := range ti { - diags = append(diags, hclDiagnostic{hclDiag}) - } - case *hcl.Diagnostic: - diags = append(diags, hclDiagnostic{ti}) - case *multierror.Error: - for _, err := range ti.Errors { - diags = append(diags, nativeError{err}) - } - case error: - switch { - case errwrap.ContainsType(ti, Diagnostics(nil)): - // If we have an errwrap wrapper with a Diagnostics hiding - // inside then we'll unpick it here to get access to the - // individual diagnostics. - diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) - case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): - // Likewise, if we have HCL diagnostics we'll unpick that too. - diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) - default: - diags = append(diags, nativeError{ti}) - } - default: - panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) - } - } - - // Given the above, we should never end up with a non-nil empty slice - // here, but we'll make sure of that so callers can rely on empty == nil - if len(diags) == 0 { - return nil - } - - return diags -} - -// HasErrors returns true if any of the diagnostics in the list have -// a severity of Error. -func (diags Diagnostics) HasErrors() bool { - for _, diag := range diags { - if diag.Severity() == Error { - return true - } - } - return false -} - -// ForRPC returns a version of the receiver that has been simplified so that -// it is friendly to RPC protocols. -// -// Currently this means that it can be serialized with encoding/gob and -// subsequently re-inflated. It may later grow to include other serialization -// formats. -// -// Note that this loses information about the original objects used to -// construct the diagnostics, so e.g. the errwrap API will not work as -// expected on an error-wrapped Diagnostics that came from ForRPC. -func (diags Diagnostics) ForRPC() Diagnostics { - ret := make(Diagnostics, len(diags)) - for i := range diags { - ret[i] = makeRPCFriendlyDiag(diags[i]) - } - return ret -} - -// Err flattens a diagnostics list into a single Go error, or to nil -// if the diagnostics list does not include any error-level diagnostics. -// -// This can be used to smuggle diagnostics through an API that deals in -// native errors, but unfortunately it will lose naked warnings (warnings -// that aren't accompanied by at least one error) since such APIs have no -// mechanism through which to report these. -// -// return result, diags.Error() -func (diags Diagnostics) Err() error { - if !diags.HasErrors() { - return nil - } - return diagnosticsAsError{diags} -} - -// ErrWithWarnings is similar to Err except that it will also return a non-nil -// error if the receiver contains only warnings. -// -// In the warnings-only situation, the result is guaranteed to be of dynamic -// type NonFatalError, allowing diagnostics-aware callers to type-assert -// and unwrap it, treating it as non-fatal. -// -// This should be used only in contexts where the caller is able to recognize -// and handle NonFatalError. For normal callers that expect a lack of errors -// to be signaled by nil, use just Diagnostics.Err. -func (diags Diagnostics) ErrWithWarnings() error { - if len(diags) == 0 { - return nil - } - if diags.HasErrors() { - return diags.Err() - } - return NonFatalError{diags} -} - -// NonFatalErr is similar to Err except that it always returns either nil -// (if there are no diagnostics at all) or NonFatalError. -// -// This allows diagnostics to be returned over an error return channel while -// being explicit that the diagnostics should not halt processing. -// -// This should be used only in contexts where the caller is able to recognize -// and handle NonFatalError. For normal callers that expect a lack of errors -// to be signaled by nil, use just Diagnostics.Err. -func (diags Diagnostics) NonFatalErr() error { - if len(diags) == 0 { - return nil - } - return NonFatalError{diags} -} - -// Sort applies an ordering to the diagnostics in the receiver in-place. -// -// The ordering is: warnings before errors, sourceless before sourced, -// short source paths before long source paths, and then ordering by -// position within each file. -// -// Diagnostics that do not differ by any of these sortable characteristics -// will remain in the same relative order after this method returns. -func (diags Diagnostics) Sort() { - sort.Stable(sortDiagnostics(diags)) -} - -type diagnosticsAsError struct { - Diagnostics -} - -func (dae diagnosticsAsError) Error() string { - diags := dae.Diagnostics - switch { - case len(diags) == 0: - // should never happen, since we don't create this wrapper if - // there are no diagnostics in the list. - return "no errors" - case len(diags) == 1: - desc := diags[0].Description() - if desc.Detail == "" { - return desc.Summary - } - return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) - default: - var ret bytes.Buffer - fmt.Fprintf(&ret, "%d problems:\n", len(diags)) - for _, diag := range dae.Diagnostics { - desc := diag.Description() - if desc.Detail == "" { - fmt.Fprintf(&ret, "\n- %s", desc.Summary) - } else { - fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) - } - } - return ret.String() - } -} - -// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped -// diagnostics object can be picked apart by errwrap-aware code. -func (dae diagnosticsAsError) WrappedErrors() []error { - var errs []error - for _, diag := range dae.Diagnostics { - if wrapper, isErr := diag.(nativeError); isErr { - errs = append(errs, wrapper.err) - } - } - return errs -} - -// NonFatalError is a special error type, returned by -// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, -// that indicates that the wrapped diagnostics should be treated as non-fatal. -// Callers can conditionally type-assert an error to this type in order to -// detect the non-fatal scenario and handle it in a different way. -type NonFatalError struct { - Diagnostics -} - -func (woe NonFatalError) Error() string { - diags := woe.Diagnostics - switch { - case len(diags) == 0: - // should never happen, since we don't create this wrapper if - // there are no diagnostics in the list. - return "no errors or warnings" - case len(diags) == 1: - desc := diags[0].Description() - if desc.Detail == "" { - return desc.Summary - } - return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) - default: - var ret bytes.Buffer - if diags.HasErrors() { - fmt.Fprintf(&ret, "%d problems:\n", len(diags)) - } else { - fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) - } - for _, diag := range woe.Diagnostics { - desc := diag.Description() - if desc.Detail == "" { - fmt.Fprintf(&ret, "\n- %s", desc.Summary) - } else { - fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) - } - } - return ret.String() - } -} - -// sortDiagnostics is an implementation of sort.Interface -type sortDiagnostics []Diagnostic - -var _ sort.Interface = sortDiagnostics(nil) - -func (sd sortDiagnostics) Len() int { - return len(sd) -} - -func (sd sortDiagnostics) Less(i, j int) bool { - iD, jD := sd[i], sd[j] - iSev, jSev := iD.Severity(), jD.Severity() - iSrc, jSrc := iD.Source(), jD.Source() - - switch { - - case iSev != jSev: - return iSev == Warning - - case (iSrc.Subject == nil) != (jSrc.Subject == nil): - return iSrc.Subject == nil - - case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: - iSubj := iSrc.Subject - jSubj := jSrc.Subject - switch { - case iSubj.Filename != jSubj.Filename: - // Path with fewer segments goes first if they are different lengths - sep := string(filepath.Separator) - iCount := strings.Count(iSubj.Filename, sep) - jCount := strings.Count(jSubj.Filename, sep) - if iCount != jCount { - return iCount < jCount - } - return iSubj.Filename < jSubj.Filename - case iSubj.Start.Byte != jSubj.Start.Byte: - return iSubj.Start.Byte < jSubj.Start.Byte - case iSubj.End.Byte != jSubj.End.Byte: - return iSubj.End.Byte < jSubj.End.Byte - } - fallthrough - - default: - // The remaining properties do not have a defined ordering, so - // we'll leave it unspecified. Since we use sort.Stable in - // the caller of this, the ordering of remaining items will - // be preserved. - return false - } -} - -func (sd sortDiagnostics) Swap(i, j int) { - sd[i], sd[j] = sd[j], sd[i] -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go deleted file mode 100644 index c427879e..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package tfdiags is a utility package for representing errors and -// warnings in a manner that allows us to produce good messages for the -// user. -// -// "diag" is short for "diagnostics", and is meant as a general word for -// feedback to a user about potential or actual problems. -// -// A design goal for this package is for it to be able to provide rich -// messaging where possible but to also be pragmatic about dealing with -// generic errors produced by system components that _can't_ provide -// such rich messaging. As a consequence, the main types in this package -- -// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" -// over an error channel and then be unpacked at the other end, so that -// error diagnostics (at least) can transit through APIs that are not -// aware of this package. -package tfdiags diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go deleted file mode 100644 index 13f7a714..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/error.go +++ /dev/null @@ -1,28 +0,0 @@ -package tfdiags - -// nativeError is a Diagnostic implementation that wraps a normal Go error -type nativeError struct { - err error -} - -var _ Diagnostic = nativeError{} - -func (e nativeError) Severity() Severity { - return Error -} - -func (e nativeError) Description() Description { - return Description{ - Summary: FormatError(e.err), - } -} - -func (e nativeError) Source() Source { - // No source information available for a native error - return Source{} -} - -func (e nativeError) FromExpr() *FromExpr { - // Native errors are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go deleted file mode 100644 index 37fb0d1a..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go +++ /dev/null @@ -1,141 +0,0 @@ -package tfdiags - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" -) - -// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic -type hclDiagnostic struct { - diag *hcl.Diagnostic -} - -var _ Diagnostic = hclDiagnostic{} - -func (d hclDiagnostic) Severity() Severity { - switch d.diag.Severity { - case hcl.DiagWarning: - return Warning - default: - return Error - } -} - -func (d hclDiagnostic) Description() Description { - return Description{ - Summary: d.diag.Summary, - Detail: d.diag.Detail, - } -} - -func (d hclDiagnostic) Source() Source { - var ret Source - if d.diag.Subject != nil { - rng := SourceRangeFromHCL(*d.diag.Subject) - ret.Subject = &rng - } - if d.diag.Context != nil { - rng := SourceRangeFromHCL(*d.diag.Context) - ret.Context = &rng - } - return ret -} - -func (d hclDiagnostic) FromExpr() *FromExpr { - if d.diag.Expression == nil || d.diag.EvalContext == nil { - return nil - } - return &FromExpr{ - Expression: d.diag.Expression, - EvalContext: d.diag.EvalContext, - } -} - -// SourceRangeFromHCL constructs a SourceRange from the corresponding range -// type within the HCL package. -func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { - return SourceRange{ - Filename: hclRange.Filename, - Start: SourcePos{ - Line: hclRange.Start.Line, - Column: hclRange.Start.Column, - Byte: hclRange.Start.Byte, - }, - End: SourcePos{ - Line: hclRange.End.Line, - Column: hclRange.End.Column, - Byte: hclRange.End.Byte, - }, - } -} - -// ToHCL constructs a HCL Range from the receiving SourceRange. This is the -// opposite of SourceRangeFromHCL. -func (r SourceRange) ToHCL() hcl.Range { - return hcl.Range{ - Filename: r.Filename, - Start: hcl.Pos{ - Line: r.Start.Line, - Column: r.Start.Column, - Byte: r.Start.Byte, - }, - End: hcl.Pos{ - Line: r.End.Line, - Column: r.End.Column, - Byte: r.End.Byte, - }, - } -} - -// ToHCL constructs a hcl.Diagnostics containing the same diagnostic messages -// as the receiving tfdiags.Diagnostics. -// -// This conversion preserves the data that HCL diagnostics are able to -// preserve but would be lossy in a round trip from tfdiags to HCL and then -// back to tfdiags, because it will lose the specific type information of -// the source diagnostics. In most cases this will not be a significant -// problem, but could produce an awkward result in some special cases such -// as converting the result of ConsolidateWarnings, which will force the -// resulting warning groups to be flattened early. -func (d Diagnostics) ToHCL() hcl.Diagnostics { - if len(d) == 0 { - return nil - } - ret := make(hcl.Diagnostics, len(d)) - for i, diag := range d { - severity := diag.Severity() - desc := diag.Description() - source := diag.Source() - fromExpr := diag.FromExpr() - - hclDiag := &hcl.Diagnostic{ - Summary: desc.Summary, - Detail: desc.Detail, - } - - switch severity { - case Warning: - hclDiag.Severity = hcl.DiagWarning - case Error: - hclDiag.Severity = hcl.DiagError - default: - // The above should always be exhaustive for all of the valid - // Severity values in this package. - panic(fmt.Sprintf("unknown diagnostic severity %s", severity)) - } - if source.Subject != nil { - hclDiag.Subject = source.Subject.ToHCL().Ptr() - } - if source.Context != nil { - hclDiag.Context = source.Context.ToHCL().Ptr() - } - if fromExpr != nil { - hclDiag.Expression = fromExpr.Expression - hclDiag.EvalContext = fromExpr.EvalContext - } - - ret[i] = hclDiag - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go deleted file mode 100644 index 485063b0..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go +++ /dev/null @@ -1,59 +0,0 @@ -package tfdiags - -import ( - "encoding/gob" -) - -type rpcFriendlyDiag struct { - Severity_ Severity - Summary_ string - Detail_ string - Subject_ *SourceRange - Context_ *SourceRange -} - -// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to -// RPC. -// -// In particular, it currently returns an object that can be serialized and -// later re-inflated using gob. This definition may grow to include other -// serializations later. -func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { - desc := diag.Description() - source := diag.Source() - return &rpcFriendlyDiag{ - Severity_: diag.Severity(), - Summary_: desc.Summary, - Detail_: desc.Detail, - Subject_: source.Subject, - Context_: source.Context, - } -} - -func (d *rpcFriendlyDiag) Severity() Severity { - return d.Severity_ -} - -func (d *rpcFriendlyDiag) Description() Description { - return Description{ - Summary: d.Summary_, - Detail: d.Detail_, - } -} - -func (d *rpcFriendlyDiag) Source() Source { - return Source{ - Subject: d.Subject_, - Context: d.Context_, - } -} - -func (d rpcFriendlyDiag) FromExpr() *FromExpr { - // RPC-friendly diagnostics cannot preserve expression information because - // expressions themselves are not RPC-friendly. - return nil -} - -func init() { - gob.Register((*rpcFriendlyDiag)(nil)) -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go deleted file mode 100644 index 78a72106..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by "stringer -type=Severity"; DO NOT EDIT. - -package tfdiags - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Error-69] - _ = x[Warning-87] -} - -const ( - _Severity_name_0 = "Error" - _Severity_name_1 = "Warning" -) - -func (i Severity) String() string { - switch { - case i == 69: - return _Severity_name_0 - case i == 87: - return _Severity_name_1 - default: - return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go deleted file mode 100644 index b0f1ecd4..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go +++ /dev/null @@ -1,30 +0,0 @@ -package tfdiags - -type simpleWarning string - -var _ Diagnostic = simpleWarning("") - -// SimpleWarning constructs a simple (summary-only) warning diagnostic. -func SimpleWarning(msg string) Diagnostic { - return simpleWarning(msg) -} - -func (e simpleWarning) Severity() Severity { - return Warning -} - -func (e simpleWarning) Description() Description { - return Description{ - Summary: string(e), - } -} - -func (e simpleWarning) Source() Source { - // No source information available for a simple warning - return Source{} -} - -func (e simpleWarning) FromExpr() *FromExpr { - // Simple warnings are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go deleted file mode 100644 index 3031168d..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfdiags - -import ( - "fmt" - "os" - "path/filepath" -) - -type SourceRange struct { - Filename string - Start, End SourcePos -} - -type SourcePos struct { - Line, Column, Byte int -} - -// StartString returns a string representation of the start of the range, -// including the filename and the line and column numbers. -func (r SourceRange) StartString() string { - filename := r.Filename - - // We'll try to relative-ize our filename here so it's less verbose - // in the common case of being in the current working directory. If not, - // we'll just show the full path. - wd, err := os.Getwd() - if err == nil { - relFn, err := filepath.Rel(wd, filename) - if err == nil { - filename = relFn - } - } - - return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go deleted file mode 100644 index eaa27373..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go +++ /dev/null @@ -1,13 +0,0 @@ -package tfdiags - -// Sourceless creates and returns a diagnostic with no source location -// information. This is generally used for operational-type errors that are -// caused by or relate to the environment where Terraform is running rather -// than to the provided configuration. -func Sourceless(severity Severity, summary, detail string) Diagnostic { - return diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - } -} diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go deleted file mode 100644 index 13868931..00000000 --- a/vendor/github.com/hashicorp/terraform/version/version.go +++ /dev/null @@ -1,40 +0,0 @@ -// The version package provides a location to set the release versions for all -// packages to consume, without creating import cycles. -// -// This package should not import any other terraform packages. -package version - -import ( - "fmt" - - version "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -var Version = "0.13.0" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var Prerelease = "beta1" - -// SemVer is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVer *version.Version - -func init() { - SemVer = version.Must(version.NewVersion(Version)) -} - -// Header is the header name used to send the current terraform version -// in http requests. -const Header = "Terraform-Version" - -// String returns the complete version string, including prerelease -func String() string { - if Prerelease != "" { - return fmt.Sprintf("%s-%s", Version, Prerelease) - } - return Version -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5..00000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537..00000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE deleted file mode 100644 index a3866a29..00000000 --- a/vendor/github.com/mitchellh/hashstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md deleted file mode 100644 index 28ce45a3..00000000 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) - -hashstructure is a Go library for creating a unique hash value -for arbitrary values in Go. - -This can be used to key values in a hash (for use in a map, set, etc.) -that are complex. The most common use case is comparing two values without -sending data across the network, caching values locally (de-dup), and so on. - -## Features - - * Hash any arbitrary Go value, including complex types. - - * Tag a struct field to ignore it and not affect the hash value. - - * Tag a slice type struct field to treat it as a set where ordering - doesn't affect the hash code but the field itself is still taken into - account to create the hash value. - - * Optionally specify a custom hash function to optimize for speed, collision - avoidance for your data set, etc. - - * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, - allowing effective hashing of time.Time - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/hashstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). - -A quick code example is shown below: - -```go -type ComplexStruct struct { - Name string - Age uint - Metadata map[string]interface{} -} - -v := ComplexStruct{ - Name: "mitchellh", - Age: 64, - Metadata: map[string]interface{}{ - "car": true, - "location": "California", - "siblings": []string{"Bob", "John"}, - }, -} - -hash, err := hashstructure.Hash(v, nil) -if err != nil { - panic(err) -} - -fmt.Printf("%d", hash) -// Output: -// 2307517237273902113 -``` diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go deleted file mode 100644 index ea13a158..00000000 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ /dev/null @@ -1,358 +0,0 @@ -package hashstructure - -import ( - "encoding/binary" - "fmt" - "hash" - "hash/fnv" - "reflect" -) - -// ErrNotStringer is returned when there's an error with hash:"string" -type ErrNotStringer struct { - Field string -} - -// Error implements error for ErrNotStringer -func (ens *ErrNotStringer) Error() string { - return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) -} - -// HashOptions are options that are available for hashing. -type HashOptions struct { - // Hasher is the hash function to use. If this isn't set, it will - // default to FNV. - Hasher hash.Hash64 - - // TagName is the struct tag to look at when hashing the structure. - // By default this is "hash". - TagName string - - // ZeroNil is flag determining if nil pointer should be treated equal - // to a zero value of pointed type. By default this is false. - ZeroNil bool -} - -// Hash returns the hash value of an arbitrary value. -// -// If opts is nil, then default options will be used. See HashOptions -// for the default values. The same *HashOptions value cannot be used -// concurrently. None of the values within a *HashOptions struct are -// safe to read/write while hashing is being done. -// -// Notes on the value: -// -// * Unexported fields on structs are ignored and do not affect the -// hash value. -// -// * Adding an exported field to a struct with the zero value will change -// the hash value. -// -// For structs, the hashing can be controlled using tags. For example: -// -// struct { -// Name string -// UUID string `hash:"ignore"` -// } -// -// The available tag values are: -// -// * "ignore" or "-" - The field will be ignored and not affect the hash code. -// -// * "set" - The field will be treated as a set, where ordering doesn't -// affect the hash code. This only works for slices. -// -// * "string" - The field will be hashed as a string, only works when the -// field implements fmt.Stringer -// -func Hash(v interface{}, opts *HashOptions) (uint64, error) { - // Create default options - if opts == nil { - opts = &HashOptions{} - } - if opts.Hasher == nil { - opts.Hasher = fnv.New64() - } - if opts.TagName == "" { - opts.TagName = "hash" - } - - // Reset the hash - opts.Hasher.Reset() - - // Create our walker and walk the structure - w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, - } - return w.visit(reflect.ValueOf(v), nil) -} - -type walker struct { - h hash.Hash64 - tag string - zeronil bool -} - -type visitOpts struct { - // Flags are a bitmask of flags to affect behavior of this visit - Flags visitFlag - - // Information about the struct containing this field - Struct interface{} - StructField string -} - -func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { - t := reflect.TypeOf(0) - - // Loop since these can be wrapped in multiple layers of pointers - // and interfaces. - for { - // If we have an interface, dereference it. We have to do this up - // here because it might be a nil in there and the check below must - // catch that. - if v.Kind() == reflect.Interface { - v = v.Elem() - continue - } - - if v.Kind() == reflect.Ptr { - if w.zeronil { - t = v.Type().Elem() - } - v = reflect.Indirect(v) - continue - } - - break - } - - // If it is nil, treat it like a zero. - if !v.IsValid() { - v = reflect.Zero(t) - } - - // Binary writing can use raw ints, we have to convert to - // a sized-int, we'll choose the largest... - switch v.Kind() { - case reflect.Int: - v = reflect.ValueOf(int64(v.Int())) - case reflect.Uint: - v = reflect.ValueOf(uint64(v.Uint())) - case reflect.Bool: - var tmp int8 - if v.Bool() { - tmp = 1 - } - v = reflect.ValueOf(tmp) - } - - k := v.Kind() - - // We can shortcut numeric values by directly binary writing them - if k >= reflect.Int && k <= reflect.Complex64 { - // A direct hash calculation - w.h.Reset() - err := binary.Write(w.h, binary.LittleEndian, v.Interface()) - return w.h.Sum64(), err - } - - switch k { - case reflect.Array: - var h uint64 - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - h = hashUpdateOrdered(w.h, h, current) - } - - return h, nil - - case reflect.Map: - var includeMap IncludableMap - if opts != nil && opts.Struct != nil { - if v, ok := opts.Struct.(IncludableMap); ok { - includeMap = v - } - } - - // Build the hash for the map. We do this by XOR-ing all the key - // and value hashes. This makes it deterministic despite ordering. - var h uint64 - for _, k := range v.MapKeys() { - v := v.MapIndex(k) - if includeMap != nil { - incl, err := includeMap.HashIncludeMap( - opts.StructField, k.Interface(), v.Interface()) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - kh, err := w.visit(k, nil) - if err != nil { - return 0, err - } - vh, err := w.visit(v, nil) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - - return h, nil - - case reflect.Struct: - parent := v.Interface() - var include Includable - if impl, ok := parent.(Includable); ok { - include = impl - } - - t := v.Type() - h, err := w.visit(reflect.ValueOf(t.Name()), nil) - if err != nil { - return 0, err - } - - l := v.NumField() - for i := 0; i < l; i++ { - if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - var f visitFlag - fieldType := t.Field(i) - if fieldType.PkgPath != "" { - // Unexported - continue - } - - tag := fieldType.Tag.Get(w.tag) - if tag == "ignore" || tag == "-" { - // Ignore this field - continue - } - - // if string is set, use the string value - if tag == "string" { - if impl, ok := innerV.Interface().(fmt.Stringer); ok { - innerV = reflect.ValueOf(impl.String()) - } else { - return 0, &ErrNotStringer{ - Field: v.Type().Field(i).Name, - } - } - } - - // Check if we implement includable and check it - if include != nil { - incl, err := include.HashInclude(fieldType.Name, innerV) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - switch tag { - case "set": - f |= visitFlagSet - } - - kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) - if err != nil { - return 0, err - } - - vh, err := w.visit(innerV, &visitOpts{ - Flags: f, - Struct: parent, - StructField: fieldType.Name, - }) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - } - - return h, nil - - case reflect.Slice: - // We have two behaviors here. If it isn't a set, then we just - // visit all the elements. If it is a set, then we do a deterministic - // hash code. - var h uint64 - var set bool - if opts != nil { - set = (opts.Flags & visitFlagSet) != 0 - } - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - if set { - h = hashUpdateUnordered(h, current) - } else { - h = hashUpdateOrdered(w.h, h, current) - } - } - - return h, nil - - case reflect.String: - // Directly hash - w.h.Reset() - _, err := w.h.Write([]byte(v.String())) - return w.h.Sum64(), err - - default: - return 0, fmt.Errorf("unknown kind to hash: %s", k) - } - -} - -func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { - // For ordered updates, use a real hash function - h.Reset() - - // We just panic if the binary writes fail because we are writing - // an int64 which should never be fail-able. - e1 := binary.Write(h, binary.LittleEndian, a) - e2 := binary.Write(h, binary.LittleEndian, b) - if e1 != nil { - panic(e1) - } - if e2 != nil { - panic(e2) - } - - return h.Sum64() -} - -func hashUpdateUnordered(a, b uint64) uint64 { - return a ^ b -} - -// visitFlag is used as a bitmask for affecting visit behavior -type visitFlag uint - -const ( - visitFlagInvalid visitFlag = iota - visitFlagSet = iota << 1 -) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go deleted file mode 100644 index b6289c0b..00000000 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ /dev/null @@ -1,15 +0,0 @@ -package hashstructure - -// Includable is an interface that can optionally be implemented by -// a struct. It will be called for each field in the struct to check whether -// it should be included in the hash. -type Includable interface { - HashInclude(field string, v interface{}) (bool, error) -} - -// IncludableMap is an interface that can optionally be implemented by -// a struct. It will be called when a map-type field is found to ask the -// struct if the map item should be included in the hash. -type IncludableMap interface { - HashIncludeMap(field string, k, v interface{}) (bool, error) -} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore deleted file mode 100644 index 9c1d9861..00000000 --- a/vendor/github.com/spf13/afero/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -sftpfs/file1 -sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 3bafbfdf..00000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,442 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open("/tmp/foo") -``` -We would replace it with: -```go -AppFs.Open("/tmp/foo") -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -### GCSFs - -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. - -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. - - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -See the [Releases Page](https://github.com/spf13/afero/releases). - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index 39f65852..00000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - // Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - // Chown changes the uid and gid of the named file. - Chown(name string, uid, gid int) error - - // Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("out of range") - ErrTooLarge = errors.New("too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index 65e20e8c..00000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,10 +0,0 @@ -# This currently does nothing. We have moved to GitHub action, but this is kept -# until spf13 has disabled this project in AppVeyor. -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 2e72793a..00000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,222 +0,0 @@ -package afero - -import ( - "io/fs" - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var ( - _ Lstater = (*BasePathFs)(nil) - _ fs.ReadDirFile = (*BasePathFile)(nil) -) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { - if rdf, ok := f.File.(fs.ReadDirFile); ok { - return rdf.ReadDir(n) - } - return readDirFile{f.File}.ReadDir(n) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chown", Path: name, Err: err} - } - return b.source.Chown(name, uid, gid) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { - oldname, err := b.RealPath(oldname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - newname, err = b.RealPath(newname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - if linker, ok := b.source.(Linker); ok { - return linker.SymlinkIfPossible(oldname, newname) - } - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { - name, err := b.RealPath(name) - if err != nil { - return "", &os.PathError{Op: "readlink", Path: name, Err: err} - } - if reader, ok := b.source.(LinkReader); ok { - return reader.ReadlinkIfPossible(name) - } - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 017d344f..00000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,315 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { - return copyFileToLayer(u.base, u.layer, name, flag, perm) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chown(name, uid, gid) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chown(name, uid, gid) - } - if err != nil { - return err - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyFileToLayer(name, flag, perm); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index eed0f225..00000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly -// +build aix darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 004d57e2..00000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix -// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index 184d6dd7..00000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,327 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes(), Chmod() and Chown()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { - if slayer, ok := u.layer.(Linker); ok { - return slayer.SymlinkIfPossible(oldname, newname) - } - - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { - if rlayer, ok := u.layer.(LinkReader); ok { - return rlayer.ReadlinkIfPossible(name) - } - - if rbase, ok := u.base.(LinkReader); ok { - return rbase.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0o777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) -} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index ac0de6d5..00000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chown(name string, uid, gid int) error { - return h.source.Chown(name, uid, gid) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/internal/common/adapters.go b/vendor/github.com/spf13/afero/internal/common/adapters.go deleted file mode 100644 index 60685caa..00000000 --- a/vendor/github.com/spf13/afero/internal/common/adapters.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2022 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import "io/fs" - -// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry -type FileInfoDirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = FileInfoDirEntry{} - -func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go deleted file mode 100644 index 938b9316..00000000 --- a/vendor/github.com/spf13/afero/iofs.go +++ /dev/null @@ -1,298 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package afero - -import ( - "io" - "io/fs" - "os" - "path" - "sort" - "time" - - "github.com/spf13/afero/internal/common" -) - -// IOFS adopts afero.Fs to stdlib io/fs.FS -type IOFS struct { - Fs -} - -func NewIOFS(fs Fs) IOFS { - return IOFS{Fs: fs} -} - -var ( - _ fs.FS = IOFS{} - _ fs.GlobFS = IOFS{} - _ fs.ReadDirFS = IOFS{} - _ fs.ReadFileFS = IOFS{} - _ fs.StatFS = IOFS{} - _ fs.SubFS = IOFS{} -) - -func (iofs IOFS) Open(name string) (fs.File, error) { - const op = "open" - - // by convention for fs.FS implementations we should perform this check - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - file, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - // file should implement fs.ReadDirFile - if _, ok := file.(fs.ReadDirFile); !ok { - file = readDirFile{file} - } - - return file, nil -} - -func (iofs IOFS) Glob(pattern string) ([]string, error) { - const op = "glob" - - // afero.Glob does not perform this check but it's required for implementations - if _, err := path.Match(pattern, ""); err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - items, err := Glob(iofs.Fs, pattern) - if err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - return items, nil -} - -func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - f, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - - defer f.Close() - - if rdf, ok := f.(fs.ReadDirFile); ok { - items, err := rdf.ReadDir(-1) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) - return items, nil - } - - items, err := f.Readdir(-1) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - sort.Sort(byName(items)) - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} - } - - return ret, nil -} - -func (iofs IOFS) ReadFile(name string) ([]byte, error) { - const op = "readfile" - - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - bytes, err := ReadFile(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - return bytes, nil -} - -func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } - -func (IOFS) wrapError(op, path string, err error) error { - if _, ok := err.(*fs.PathError); ok { - return err // don't need to wrap again - } - - return &fs.PathError{ - Op: op, - Path: path, - Err: err, - } -} - -// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open -type readDirFile struct { - File -} - -var _ fs.ReadDirFile = readDirFile{} - -func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) - if err != nil { - return nil, err - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} - } - - return ret, nil -} - -// FromIOFS adopts io/fs.FS to use it as afero.Fs -// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission -// To store modifications you may use afero.CopyOnWriteFs -type FromIOFS struct { - fs.FS -} - -var _ Fs = FromIOFS{} - -func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } - -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } - -func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { - return notImplemented("mkdirall", path) -} - -func (f FromIOFS) Open(name string) (File, error) { - file, err := f.FS.Open(name) - if err != nil { - return nil, err - } - - return fromIOFSFile{File: file, name: name}, nil -} - -func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return f.Open(name) -} - -func (f FromIOFS) Remove(name string) error { - return notImplemented("remove", name) -} - -func (f FromIOFS) RemoveAll(path string) error { - return notImplemented("removeall", path) -} - -func (f FromIOFS) Rename(oldname, newname string) error { - return notImplemented("rename", oldname) -} - -func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } - -func (f FromIOFS) Name() string { return "fromiofs" } - -func (f FromIOFS) Chmod(name string, mode os.FileMode) error { - return notImplemented("chmod", name) -} - -func (f FromIOFS) Chown(name string, uid, gid int) error { - return notImplemented("chown", name) -} - -func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { - return notImplemented("chtimes", name) -} - -type fromIOFSFile struct { - fs.File - name string -} - -func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { - readerAt, ok := f.File.(io.ReaderAt) - if !ok { - return -1, notImplemented("readat", f.name) - } - - return readerAt.ReadAt(p, off) -} - -func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { - seeker, ok := f.File.(io.Seeker) - if !ok { - return -1, notImplemented("seek", f.name) - } - - return seeker.Seek(offset, whence) -} - -func (f fromIOFSFile) Write(p []byte) (n int, err error) { - return -1, notImplemented("write", f.name) -} - -func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { - return -1, notImplemented("writeat", f.name) -} - -func (f fromIOFSFile) Name() string { return f.name } - -func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(count) - if err != nil { - return nil, err - } - - ret := make([]os.FileInfo, len(entries)) - for i := range entries { - ret[i], err = entries[i].Info() - - if err != nil { - return nil, err - } - } - - return ret, nil -} - -func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(n) - if err != nil { - return nil, err - } - - ret := make([]string, len(entries)) - for i := range entries { - ret[i] = entries[i].Name() - } - - return ret, nil -} - -func (f fromIOFSFile) Sync() error { return nil } - -func (f fromIOFSFile) Truncate(size int64) error { - return notImplemented("truncate", f.name) -} - -func (f fromIOFSFile) WriteString(s string) (ret int, err error) { - return -1, notImplemented("writestring", f.name) -} - -func notImplemented(op, path string) error { - return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index fa6abe1e..00000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var ( - randNum uint32 - randmu sync.Mutex -) - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextRandom() string { - randmu.Lock() - r := randNum - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - randNum = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir, -// opens the file for reading and writing, and returns the resulting *os.File. -// The filename is generated by taking pattern and adding a random -// string to the end. If pattern includes a "*", the random string -// replaces the last "*". -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, pattern string) (f File, err error) { - return TempFile(a.Fs, dir, pattern) -} - -func TempFile(fs Fs, dir, pattern string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - var prefix, suffix string - if pos := strings.LastIndex(pattern, "*"); pos != -1 { - prefix, suffix = pattern[:pos], pattern[pos+1:] - } else { - prefix = pattern - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - randNum = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} - -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0o700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - randNum = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0..00000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index 7db4b7de..00000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f..00000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5..00000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 62fe4498..00000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "io/fs" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - "github.com/spf13/afero/internal/common" -) - -const FilePathSeparator = string(filepath.Separator) - -var _ fs.ReadDirFile = &File{} - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time - uid int - gid int -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func SetUID(f *FileData, uid int) { - f.Lock() - f.uid = uid - f.Unlock() -} - -func SetGID(f *FileData, gid int) { - f.Lock() - f.gid = gid - f.Unlock() -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -// Implements fs.ReadDirFile -func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { - fi, err := f.Readdir(n) - if err != nil { - return nil, err - } - di := make([]fs.DirEntry, len(fi)) - for i, f := range fi { - di[i] = common.FileInfoDirEntry{FileInfo: f} - } - return di, nil -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - prev := atomic.LoadInt64(&f.at) - atomic.StoreInt64(&f.at, off) - n, err = f.Read(b) - atomic.StoreInt64(&f.at, prev) - return -} - -func (f *File) Truncate(size int64) error { - if f.closed { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - f.fileData.Lock() - defer f.fileData.Unlock() - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed { - return 0, ErrFileClosed - } - switch whence { - case io.SeekStart: - atomic.StoreInt64(&f.at, offset) - case io.SeekCurrent: - atomic.AddInt64(&f.at, offset) - case io.SeekEnd: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.closed { - return 0, ErrFileClosed - } - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} - -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} - -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} - -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("out of range") - ErrTooLarge = errors.New("too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index e6b7d70b..00000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0o755) - m.data[FilePathSeparator] = root - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file, 0) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, perm) - if err != nil { - // log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - // log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - perm &= chmodBits - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - // Dobule check that it doesn't exist. - if _, ok := m.getData()[name]; ok { - m.mu.Unlock() - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - m.mu.Unlock() - - return m.setFileMode(name, perm|os.ModeDir) -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - perm &= chmodBits - chmod := false - file, err := m.openWrite(name) - if err == nil && (flag&os.O_EXCL > 0) { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} - } - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, io.SeekEnd) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - return file, m.setFileMode(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p := range m.getData() { - if p == path || strings.HasPrefix(p, path+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData, 0) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - - for p, fileData := range m.getData() { - if strings.HasPrefix(p, oldname+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - p := strings.Replace(p, oldname, newname, 1) - m.getData()[p] = fileData - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fileInfo, err := m.Stat(name) - return fileInfo, false, err -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - mode &= chmodBits - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits - - mode = prevOtherBits | mode - return m.setFileMode(name, mode) -} - -func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chown(name string, uid, gid int) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} - } - - mem.SetUID(f, uid) - mem.SetGID(f, gid) - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index f1366321..00000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chown(name string, uid, gid int) error { - return os.Chown(name, uid, gid) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} - -func (OsFs) SymlinkIfPossible(oldname, newname string) error { - return os.Symlink(oldname, newname) -} - -func (OsFs) ReadlinkIfPossible(name string) (string, error) { - return os.Readlink(name) -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f..00000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index bd8f9264..00000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,96 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { - if srdr, ok := r.source.(LinkReader); ok { - return srdr.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 218f3b23..00000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,223 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Chown(name string, uid, gid int) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chown(name, uid, gid) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - if err != nil { - return nil, err - } - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go deleted file mode 100644 index aa6ae125..00000000 --- a/vendor/github.com/spf13/afero/symlink.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" -) - -// Symlinker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It indicates support for 3 symlink related interfaces that implement the -// behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink -type Symlinker interface { - Lstater - Linker - LinkReader -} - -// Linker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, -// or the filesystem otherwise supports Symlink's. -type Linker interface { - SymlinkIfPossible(oldname, newname string) error -} - -// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system -// does not support Symlink's either directly or through its delegated filesystem. -// As expressed by support for the Linker interface. -var ErrNoSymlink = errors.New("symlink not supported") - -// LinkReader is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -type LinkReader interface { - ReadlinkIfPossible(name string) (string, error) -} - -// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system -// does not support the readlink operation either directly or through its delegated filesystem. -// As expressed by support for the LinkReader interface. -var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index 62dd6c93..00000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,330 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), io.SeekStart) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - files := make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - files := f.files[f.off:] - - if c <= 0 { - return files, nil - } - - if len(files) == 0 { - return nil, io.EOF - } - - if c > len(files) { - c = len(files) - } - - defer func() { f.off += c }() - return files[:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyFile(base Fs, layer Fs, name string, bfh File) error { - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - return copyFile(base, layer, name, bfh) -} - -func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { - bfh, err := base.OpenFile(name, flag, perm) - if err != nil { - return err - } - defer bfh.Close() - - return copyFile(base, layer, name, bfh) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 9e4cba27..00000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/runes" - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0o777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - if err != nil { - return false, err - } - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml deleted file mode 100644 index 13ff9986..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - 1.12 - diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md deleted file mode 100644 index b329bd05..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md +++ /dev/null @@ -1,16 +0,0 @@ -# 1.0.2 (June 17, 2020) - -* The YAML decoder now follows the YAML specification more closely when parsing - numeric values. - ([#6](https://github.com/zclconf/go-cty-yaml/pull/6)) - -# 1.0.1 (July 30, 2019) - -* The YAML decoder is now correctly treating quoted scalars as verbatim literal - strings rather than using the fuzzy type selection rules for them. Fuzzy - type selection rules still apply to unquoted scalars. - ([#4](https://github.com/zclconf/go-cty-yaml/pull/4)) - -# 1.0.0 (May 26, 2019) - -Initial release. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE deleted file mode 100644 index 4e6c00ab..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -This package is derived from gopkg.in/yaml.v2, which is copyright -2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Includes mechanical ports of code from libyaml, distributed under its original -license. See LICENSE.libyaml for more information. - -Modifications for cty interfacing copyright 2019 Martin Atkins, and -distributed under the same license terms. diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go deleted file mode 100644 index 1f7e87e6..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/apic.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go deleted file mode 100644 index a73b34a8..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/converter.go +++ /dev/null @@ -1,69 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ConverterConfig is used to configure a new converter, using NewConverter. -type ConverterConfig struct { - // EncodeAsFlow, when set to true, causes Marshal to produce flow-style - // mapping and sequence serializations. - EncodeAsFlow bool -} - -// A Converter can marshal and unmarshal between cty values and YAML bytes. -// -// Because there are many different ways to map cty to YAML and vice-versa, -// a converter is configurable using the settings in ConverterConfig, which -// allow for a few different permutations of mapping to YAML. -// -// If you are just trying to work with generic, standard YAML, the predefined -// converter in Standard should be good enough. -type Converter struct { - encodeAsFlow bool -} - -// NewConverter creates a new Converter with the given configuration. -func NewConverter(config *ConverterConfig) *Converter { - return &Converter{ - encodeAsFlow: config.EncodeAsFlow, - } -} - -// Standard is a predefined Converter that produces and consumes generic YAML -// using only built-in constructs that any other YAML implementation ought to -// understand. -var Standard *Converter = NewConverter(&ConverterConfig{}) - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { - return c.impliedType(src) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func (c *Converter) Marshal(v cty.Value) ([]byte, error) { - return c.marshal(v) -} - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// An error is returned if the given source contains any YAML document -// delimiters. -func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return c.unmarshal(src, ty) -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go deleted file mode 100644 index b91141cc..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go +++ /dev/null @@ -1,57 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code -// into a cty Value, using the ImpliedType and Unmarshal methods of the -// Standard pre-defined converter. -var YAMLDecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "src", - Type: cty.String, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsKnown() { - return cty.DynamicPseudoType, nil - } - if args[0].IsNull() { - return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") - } - return Standard.ImpliedType([]byte(args[0].AsString())) - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - return Standard.Unmarshal([]byte(args[0].AsString()), retType) - }, -}) - -// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value -// into YAML. -var YAMLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if !args[0].IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - raw, err := Standard.Marshal(args[0]) - if err != nil { - return cty.NilVal, err - } - return cty.StringVal(string(raw)), nil - }, -}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go deleted file mode 100644 index e369ff27..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/decode.go +++ /dev/null @@ -1,261 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilVal, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &valueAnalysis{ - anchorsPending: map[string]int{}, - anchorVals: map[string]cty.Value{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing start of document") - } - - v, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return convert.Convert(v, ty) -} - -func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - return c.unmarshalParseRemainder(an, &evt, p) -} - -func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.unmarshalScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.unmarshalAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.unmarshalMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.unmarshalSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilVal, parseEventErrorWrap(evt, err) - } - - if val.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - val = cty.StringVal("<<") - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, val) - } - return val, nil -} - -func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - vals := make(map[string]cty.Value) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - v := cty.ObjectVal(vals) - if anchor != "" { - an.completeAnchor(anchor, v) - } - return v, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilVal, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - ty := val.Type() - if !(ty.IsObjectType() || ty.IsMapType()) { - return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for it := val.ElementIterator(); it.Next(); { - k, v := it.Element() - vals[k.AsString()] = v - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") - } - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - vals[keyVal.AsString()] = val - } -} - -func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var vals []cty.Value - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.TupleVal(vals) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - val, err := c.unmarshalParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilVal, err - } - - vals = append(vals, val) - } -} - -func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - v, err := an.anchorVal(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return v, err -} - -type valueAnalysis struct { - anchorsPending map[string]int - anchorVals map[string]cty.Value -} - -func (an *valueAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorVals[name] = v -} - -func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorVals[name] - if !ok { - return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go deleted file mode 100644 index a1c2cc52..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go deleted file mode 100644 index daa1478a..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/encode.go +++ /dev/null @@ -1,189 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -func (c *Converter) marshal(v cty.Value) ([]byte, error) { - var buf bytes.Buffer - - e := &yaml_emitter_t{} - yaml_emitter_initialize(e) - yaml_emitter_set_output_writer(e, &buf) - yaml_emitter_set_unicode(e, true) - - var evt yaml_event_t - yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_document_start_event_initialize(&evt, nil, nil, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - if err := c.marshalEmit(v, e); err != nil { - return nil, err - } - - yaml_document_end_event_initialize(&evt, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_stream_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - return buf.Bytes(), nil -} - -func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { - ty := v.Type() - switch { - case v.IsNull(): - return c.marshalPrimitive(v, e) - case !v.IsKnown(): - return fmt.Errorf("cannot serialize unknown value as YAML") - case ty.IsPrimitiveType(): - return c.marshalPrimitive(v, e) - case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): - return c.marshalSequence(v, e) - case ty.IsObjectType(), ty.IsMapType(): - return c.marshalMapping(v, e) - default: - return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) - } -} - -func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { - var evt yaml_event_t - - if v.IsNull() { - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte("null"), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil - } - - switch v.Type() { - case cty.String: - str := v.AsString() - style := yaml_DOUBLE_QUOTED_SCALAR_STYLE - if strings.Contains(str, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - style, - ) - case cty.Number: - str := v.AsBigFloat().Text('f', -1) - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - case cty.Bool: - var str string - switch v { - case cty.True: - str = "true" - case cty.False: - str = "false" - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - } - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_SEQUENCE_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_SEQUENCE_STYLE - } - - var evt yaml_event_t - yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - _, v := it.Element() - err := c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_sequence_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_MAPPING_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_MAPPING_STYLE - } - - var evt yaml_event_t - yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - k, v := it.Element() - err := c.marshalEmit(k, e) - if err != nil { - return err - } - err = c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_mapping_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go deleted file mode 100644 index ae41c488..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/error.go +++ /dev/null @@ -1,97 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" -) - -// Error is an error implementation used to report errors that correspond to -// a particular position in an input buffer. -type Error struct { - cause error - Line, Column int -} - -func (e Error) Error() string { - return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) -} - -// Cause is an implementation of the interface used by -// github.com/pkg/errors.Cause, returning the underlying error without the -// position information. -func (e Error) Cause() error { - return e.cause -} - -// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper -// returning the underlying error without the position information. -func (e Error) WrappedErrors() []error { - return []error{e.cause} -} - -func parserError(p *yaml_parser_t) error { - var cause error - if len(p.problem) > 0 { - cause = errors.New(p.problem) - } else { - cause = errors.New("invalid YAML syntax") // useless generic error, then - } - - return parserErrorWrap(p, cause) -} - -func parserErrorWrap(p *yaml_parser_t, cause error) error { - switch { - case p.problem_mark.line != 0: - line := p.problem_mark.line - column := p.problem_mark.column - // Scanner errors don't iterate line before returning error - if p.error == yaml_SCANNER_ERROR { - line++ - column = 0 - } - return Error{ - cause: cause, - Line: line, - Column: column + 1, - } - case p.context_mark.line != 0: - return Error{ - cause: cause, - Line: p.context_mark.line, - Column: p.context_mark.column + 1, - } - default: - return cause - } -} - -func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { - return parserErrorWrap(p, fmt.Errorf(f, vals...)) -} - -func parseEventErrorWrap(evt *yaml_event_t, cause error) error { - if evt.start_mark.line == 0 { - // Event does not have a start mark, so we won't wrap the error at all - return cause - } - return Error{ - cause: cause, - Line: evt.start_mark.line, - Column: evt.start_mark.column + 1, - } -} - -func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { - return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) -} - -func emitterError(e *yaml_emitter_t) error { - var cause error - if len(e.problem) > 0 { - cause = errors.New(e.problem) - } else { - cause = errors.New("failed to write YAML token") // useless generic error, then - } - return cause -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go deleted file mode 100644 index 5b7b0686..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go +++ /dev/null @@ -1,268 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) impliedType(src []byte) (cty.Type, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilType, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &typeAnalysis{ - anchorsPending: map[string]int{}, - anchorTypes: map[string]cty.Type{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing start of document") - } - - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return ty, err -} - -func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - return c.impliedTypeParseRemainder(an, &evt, p) -} - -func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.impliedTypeScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.impliedTypeAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.impliedTypeMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.impliedTypeSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - implicit := evt.implicit - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - var ty cty.Type - switch { - case tag == "" && !implicit: - // Untagged explicit string - ty = cty.String - default: - v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilType, parseEventErrorWrap(evt, err) - } - if v.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - ty = cty.String - } else { - ty = v.Type() - } - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, ty) - } - return ty, nil -} - -func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - atys := make(map[string]cty.Type) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - ty := cty.Object(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilType, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - if !ty.IsObjectType() { - return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for name, aty := range ty.AttributeTypes() { - atys[name] = aty - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") - } - valTy, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - atys[keyVal.AsString()] = valTy - } -} - -func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var atys []cty.Type - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.Tuple(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilType, err - } - - atys = append(atys, valTy) - } -} - -func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - ty, err := an.anchorType(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return ty, err -} - -type typeAnalysis struct { - anchorsPending map[string]int - anchorTypes map[string]cty.Type -} - -func (an *typeAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorTypes[name] = ty -} - -func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorTypes[name] - if !ok { - return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go deleted file mode 100644 index 81d05dfe..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go deleted file mode 100644 index 7c1f5fac..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go deleted file mode 100644 index 138c7aaa..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/resolve.go +++ /dev/null @@ -1,293 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/zclconf/go-cty/cty" -) - -type resolveMapItem struct { - value cty.Value - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -// Numeric literal regular expressions from the YAML 1.2 spec: -// -// https://yaml.org/spec/1.2/spec.html#id2805071 -var integerLiteralRegexp = regexp.MustCompile(`` + - // start of string, optional sign, and one of: - `\A[-+]?(` + - // octal literal with 0o prefix and optional _ spaces - `|0o[0-7_]+` + - // decimal literal and optional _ spaces - `|[0-9_]+` + - // hexadecimal literal with 0x prefix and optional _ spaces - `|0x[0-9a-fA-F_]+` + - // end of group, and end of string - `)\z`, -) -var floatLiteralRegexp = regexp.MustCompile( - `\A[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?\z`, -) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v cty.Value - tag string - l []string - }{ - {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { - if !resolvableTag(tag) { - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if src != "" { - hint = resolveTable[src[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { - return cty.StringVal(src), nil - } - - // Handle things we can lookup in a map. - if item, ok := resolveMap[src]; ok { - return item.value, nil - } - - if tag == "" { - for _, nan := range []string{".nan", ".NaN", ".NAN"} { - if src == nan { - // cty cannot represent NaN, so this is an error - return cty.NilVal, fmt.Errorf("floating point NaN is not supported") - } - } - } - - // Base 60 floats are intentionally not supported. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - if numberVal, err := cty.ParseNumberVal(src); err == nil { - return numberVal, nil - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - } - - if integerLiteralRegexp.MatchString(src) { - tag = yaml_INT_TAG // will handle parsing below in our tag switch - break - } - if floatLiteralRegexp.MatchString(src) { - tag = yaml_FLOAT_TAG // will handle parsing below in our tag switch - break - } - default: - panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) - } - } - - if tag == "" && src == "<<" { - return mergeMappingVal, nil - } - - switch tag { - case yaml_STR_TAG, yaml_BINARY_TAG: - // If it's binary then we want to keep the base64 representation, because - // cty has no binary type, but we will check that it's actually base64. - if tag == yaml_BINARY_TAG { - _, err := base64.StdEncoding.DecodeString(src) - if err != nil { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) - } - } - return cty.StringVal(src), nil - case yaml_BOOL_TAG: - item, ok := resolveMap[src] - if !ok || item.tag != yaml_BOOL_TAG { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - } - return item.value, nil - case yaml_FLOAT_TAG, yaml_INT_TAG: - // Note: We don't actually check that a value tagged INT is a whole - // number here. We could, but cty generally doesn't care about the - // int/float distinction, so we'll just be generous and accept it. - plain := strings.Replace(src, "_", "", -1) - if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats - return numberVal, nil - } - if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberIntVal(intv), nil - } - if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberUIntVal(uintv), nil - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_TIMESTAMP_TAG: - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_NULL_TAG: - return cty.NullVal(cty.DynamicPseudoType), nil - case "": - return cty.StringVal(src), nil - default: - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} - -type mergeMapping struct{} - -var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) -var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go deleted file mode 100644 index 077fd1dd..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go +++ /dev/null @@ -1,2696 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go deleted file mode 100644 index a2dde608..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go deleted file mode 100644 index 2c314cc1..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yaml.go +++ /dev/null @@ -1,215 +0,0 @@ -// Package yaml can marshal and unmarshal cty values in YAML format. -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" - - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// This is an alias for Unmarshal on the predefined Converter in "Standard". -// -// An error is returned if the given source contains any YAML document -// delimiters. -func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return Standard.Unmarshal(src, ty) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// This is an alias for Marshal on the predefined Converter in "Standard". -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func Marshal(v cty.Value) ([]byte, error) { - return Standard.Marshal(v) -} - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -// -// This is an alias for ImpliedType on the predefined Converter in "Standard". -func ImpliedType(src []byte) (cty.Type, error) { - return Standard.ImpliedType(src) -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go deleted file mode 100644 index e25cee56..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go +++ /dev/null @@ -1,738 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go deleted file mode 100644 index 8110ce3c..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go deleted file mode 100644 index 1eb99f28..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package msgpack provides functions for serializing cty values in the -// msgpack encoding, and decoding them again. -// -// If the same type information is provided both at encoding and decoding time -// then values can be round-tripped without loss, except for capsule types -// which are not currently supported. -// -// If any unknown values are passed to Marshal then they will be represented -// using a msgpack extension with type code zero, which is understood by -// the Unmarshal function within this package but will not be understood by -// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values -// are used if interoperability with other msgpack implementations is -// required. -package msgpack diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go deleted file mode 100644 index 9a4e94c2..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go +++ /dev/null @@ -1,31 +0,0 @@ -package msgpack - -import ( - "bytes" - - "github.com/vmihailenco/msgpack/v4" - "github.com/zclconf/go-cty/cty" -) - -type dynamicVal struct { - Value cty.Value - Path cty.Path -} - -func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) { - // Rather than defining a msgpack-specific serialization of types, - // instead we use the existing JSON serialization. - typeJSON, err := dv.Value.Type().MarshalJSON() - if err != nil { - return nil, dv.Path.NewErrorf("failed to serialize type: %s", err) - } - var buf bytes.Buffer - enc := msgpack.NewEncoder(&buf) - enc.EncodeArrayLen(2) - enc.EncodeBytes(typeJSON) - err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go deleted file mode 100644 index 6db0815e..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go +++ /dev/null @@ -1,8 +0,0 @@ -package msgpack - -import ( - "math" -) - -var negativeInfinity = math.Inf(-1) -var positiveInfinity = math.Inf(1) diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go deleted file mode 100644 index 2c4da8b5..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go +++ /dev/null @@ -1,212 +0,0 @@ -package msgpack - -import ( - "bytes" - "math/big" - "sort" - - "github.com/vmihailenco/msgpack/v4" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// Marshal produces a msgpack serialization of the given value that -// can be decoded into the given type later using Unmarshal. -// -// The given value must conform to the given type, or an error will -// be returned. -func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { - errs := val.Type().TestConformance(ty) - if errs != nil { - // Attempt a conversion - var err error - val, err = convert.Convert(val, ty) - if err != nil { - return nil, err - } - } - - // From this point onward, val can be assumed to be conforming to t. - - var path cty.Path - var buf bytes.Buffer - enc := msgpack.NewEncoder(&buf) - enc.UseCompactEncoding(true) - - err := marshal(val, ty, path, enc) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { - if val.IsMarked() { - return path.NewErrorf("value has marks, so it cannot be serialized") - } - - // If we're going to decode as DynamicPseudoType then we need to save - // dynamic type information to recover the real type. - if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { - return marshalDynamic(val, path, enc) - } - - if !val.IsKnown() { - err := enc.Encode(unknownVal) - if err != nil { - return path.NewError(err) - } - return nil - } - if val.IsNull() { - err := enc.EncodeNil() - if err != nil { - return path.NewError(err) - } - return nil - } - - // The caller should've guaranteed that the given val is conformant with - // the given type ty, so we'll proceed under that assumption here. - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - err := enc.EncodeString(val.AsString()) - if err != nil { - return path.NewError(err) - } - return nil - case cty.Number: - var err error - switch { - case val.RawEquals(cty.PositiveInfinity): - err = enc.EncodeFloat64(positiveInfinity) - case val.RawEquals(cty.NegativeInfinity): - err = enc.EncodeFloat64(negativeInfinity) - default: - bf := val.AsBigFloat() - if iv, acc := bf.Int64(); acc == big.Exact { - err = enc.EncodeInt(iv) - } else if fv, acc := bf.Float64(); acc == big.Exact { - err = enc.EncodeFloat64(fv) - } else { - err = enc.EncodeString(bf.Text('f', -1)) - } - } - if err != nil { - return path.NewError(err) - } - return nil - case cty.Bool: - err := enc.EncodeBool(val.True()) - if err != nil { - return path.NewError(err) - } - return nil - default: - panic("unsupported primitive type") - } - case ty.IsListType(), ty.IsSetType(): - enc.EncodeArrayLen(val.LengthInt()) - ety := ty.ElementType() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - for it.Next() { - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - err := marshal(ev, ety, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsMapType(): - enc.EncodeMapLen(val.LengthInt()) - ety := ty.ElementType() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - for it.Next() { - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - var err error - err = marshal(ek, ek.Type(), path, enc) - if err != nil { - return err - } - err = marshal(ev, ety, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsTupleType(): - etys := ty.TupleElementTypes() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - i := 0 - enc.EncodeArrayLen(len(etys)) - for it.Next() { - ety := etys[i] - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - err := marshal(ev, ety, path, enc) - if err != nil { - return err - } - i++ - } - return nil - case ty.IsObjectType(): - atys := ty.AttributeTypes() - path := append(path, nil) // local override of 'path' with extra element - - names := make([]string, 0, len(atys)) - for k := range atys { - names = append(names, k) - } - sort.Strings(names) - - enc.EncodeMapLen(len(names)) - - for _, k := range names { - aty := atys[k] - av := val.GetAttr(k) - path[len(path)-1] = cty.GetAttrStep{ - Name: k, - } - var err error - err = marshal(cty.StringVal(k), cty.String, path, enc) - if err != nil { - return err - } - err = marshal(av, aty, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsCapsuleType(): - return path.NewErrorf("capsule types not supported for msgpack encoding") - default: - // should never happen - return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) - } -} - -// marshalDynamic adds an extra wrapping object containing dynamic type -// information for the given value. -func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { - dv := dynamicVal{ - Value: val, - Path: path, - } - return enc.Encode(&dv) -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go deleted file mode 100644 index a169f28f..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go +++ /dev/null @@ -1,167 +0,0 @@ -package msgpack - -import ( - "bytes" - "fmt" - "io" - - "github.com/vmihailenco/msgpack/v4" - msgpackcodes "github.com/vmihailenco/msgpack/v4/codes" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty Type implied by the structure of the given -// msgpack-compliant buffer. This function implements the default type mapping -// behavior used when decoding arbitrary msgpack without explicit cty Type -// information. -// -// The rules are as follows: -// -// msgpack strings, numbers and bools map to their equivalent primitive type in -// cty. -// -// msgpack maps become cty object types, with the attributes defined by the -// map keys and the types of their values. -// -// msgpack arrays become cty tuple types, with the elements defined by the -// types of the array members. -// -// Any nulls are typed as DynamicPseudoType, so callers of this function -// must be prepared to deal with this. Callers that do not wish to deal with -// dynamic typing should not use this function and should instead describe -// their required types explicitly with a cty.Type instance when decoding. -// -// Any unknown values are similarly typed as DynamicPseudoType, because these -// do not carry type information on the wire. -// -// Any parse errors will be returned as an error, and the type will be the -// invalid value cty.NilType. -func ImpliedType(buf []byte) (cty.Type, error) { - r := bytes.NewReader(buf) - dec := msgpack.NewDecoder(r) - - ty, err := impliedType(dec) - if err != nil { - return cty.NilType, err - } - - // We must now be at the end of the buffer - err = dec.Skip() - if err != io.EOF { - return ty, fmt.Errorf("extra bytes after msgpack value") - } - - return ty, nil -} - -func impliedType(dec *msgpack.Decoder) (cty.Type, error) { - // If this function returns with a nil error then it must have already - // consumed the next value from the decoder, since when called recursively - // the caller will be expecting to find a following value here. - - code, err := dec.PeekCode() - if err != nil { - return cty.NilType, err - } - - switch { - - case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): - err := dec.Skip() - return cty.DynamicPseudoType, err - - case code == msgpackcodes.True || code == msgpackcodes.False: - _, err := dec.DecodeBool() - return cty.Bool, err - - case msgpackcodes.IsFixedNum(code): - _, err := dec.DecodeInt64() - return cty.Number, err - - case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: - _, err := dec.DecodeInt64() - return cty.Number, err - - case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: - _, err := dec.DecodeUint64() - return cty.Number, err - - case code == msgpackcodes.Float || code == msgpackcodes.Double: - _, err := dec.DecodeFloat64() - return cty.Number, err - - case msgpackcodes.IsString(code): - _, err := dec.DecodeString() - return cty.String, err - - case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: - return impliedObjectType(dec) - - case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: - return impliedTupleType(dec) - - default: - return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) - } -} - -func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { - // If we get in here then we've already peeked the next code and know - // it's some sort of map. - l, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicPseudoType, nil - } - - var atys map[string]cty.Type - - for i := 0; i < l; i++ { - // Read the map key first. We require maps to be strings, but msgpack - // doesn't so we're prepared to error here if not. - k, err := dec.DecodeString() - if err != nil { - return cty.DynamicPseudoType, err - } - - aty, err := impliedType(dec) - if err != nil { - return cty.DynamicPseudoType, err - } - - if atys == nil { - atys = make(map[string]cty.Type) - } - atys[k] = aty - } - - if len(atys) == 0 { - return cty.EmptyObject, nil - } - - return cty.Object(atys), nil -} - -func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { - // If we get in here then we've already peeked the next code and know - // it's some sort of array. - l, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicPseudoType, nil - } - - if l == 0 { - return cty.EmptyTuple, nil - } - - etys := make([]cty.Type, l) - - for i := 0; i < l; i++ { - ety, err := impliedType(dec) - if err != nil { - return cty.DynamicPseudoType, err - } - etys[i] = ety - } - - return cty.Tuple(etys), nil -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go deleted file mode 100644 index 6507bc4b..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go +++ /dev/null @@ -1,16 +0,0 @@ -package msgpack - -type unknownType struct{} - -var unknownVal = unknownType{} - -// unknownValBytes is the raw bytes of the msgpack fixext1 value we -// write to represent an unknown value. It's an extension value of -// type zero whose value is irrelevant. Since it's irrelevant, we -// set it to a single byte whose value is also zero, since that's -// the most compact possible representation. -var unknownValBytes = []byte{0xd4, 0, 0} - -func (uv unknownType) MarshalMsgpack() ([]byte, error) { - return unknownValBytes, nil -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go deleted file mode 100644 index 1ea0b0a2..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go +++ /dev/null @@ -1,334 +0,0 @@ -package msgpack - -import ( - "bytes" - - "github.com/vmihailenco/msgpack/v4" - msgpackCodes "github.com/vmihailenco/msgpack/v4/codes" - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of -// the given type, returning the result. -// -// If an error is returned, the error is written with a hypothetical -// end-user that wrote the msgpack file as its audience, using cty type -// system concepts rather than Go type system concepts. -func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { - r := bytes.NewReader(b) - dec := msgpack.NewDecoder(r) - - var path cty.Path - return unmarshal(dec, ty, path) -} - -func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { - peek, err := dec.PeekCode() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - if msgpackCodes.IsExt(peek) { - // We just assume _all_ extensions are unknown values, - // since we don't have any other extensions. - dec.Skip() // skip what we've peeked - return cty.UnknownVal(ty), nil - } - if ty == cty.DynamicPseudoType { - return unmarshalDynamic(dec, path) - } - if peek == msgpackCodes.Nil { - dec.Skip() // skip what we've peeked - return cty.NullVal(ty), nil - } - - switch { - case ty.IsPrimitiveType(): - val, err := unmarshalPrimitive(dec, ty, path) - if err != nil { - return cty.NilVal, err - } - return val, nil - case ty.IsListType(): - return unmarshalList(dec, ty.ElementType(), path) - case ty.IsSetType(): - return unmarshalSet(dec, ty.ElementType(), path) - case ty.IsMapType(): - return unmarshalMap(dec, ty.ElementType(), path) - case ty.IsTupleType(): - return unmarshalTuple(dec, ty.TupleElementTypes(), path) - case ty.IsObjectType(): - return unmarshalObject(dec, ty.AttributeTypes(), path) - default: - return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) - } -} - -func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { - switch ty { - case cty.Bool: - rv, err := dec.DecodeBool() - if err != nil { - return cty.DynamicVal, path.NewErrorf("bool is required") - } - return cty.BoolVal(rv), nil - case cty.Number: - // Marshal will try int and float first, if the value can be - // losslessly represented in these encodings, and then fall - // back on a string if the number is too large or too precise. - peek, err := dec.PeekCode() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - - if msgpackCodes.IsFixedNum(peek) { - rv, err := dec.DecodeInt64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberIntVal(rv), nil - } - - switch peek { - case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: - rv, err := dec.DecodeInt64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberIntVal(rv), nil - case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: - rv, err := dec.DecodeUint64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberUIntVal(rv), nil - case msgpackCodes.Float, msgpackCodes.Double: - rv, err := dec.DecodeFloat64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberFloatVal(rv), nil - default: - rv, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - v, err := cty.ParseNumberVal(rv) - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return v, nil - } - case cty.String: - rv, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path.NewErrorf("string is required") - } - return cty.StringVal(rv), nil - default: - // should never happen - panic("unsupported primitive type") - } -} - -func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a list is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.List(ety)), nil - case length == 0: - return cty.ListValEmpty(ety), nil - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.ListVal(vals), nil -} - -func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a set is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Set(ety)), nil - case length == 0: - return cty.SetValEmpty(ety), nil - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.SetVal(vals), nil -} - -func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a map is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Map(ety)), nil - case length == 0: - return cty.MapValEmpty(ety), nil - } - - vals := make(map[string]cty.Value, length) - path = append(path, nil) - for i := 0; i < length; i++ { - key, err := dec.DecodeString() - if err != nil { - path[:len(path)-1].NewErrorf("non-string key in map") - } - - path[len(path)-1] = cty.IndexStep{ - Key: cty.StringVal(key), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals[key] = val - } - - return cty.MapVal(vals), nil -} - -func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a tuple is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Tuple(etys)), nil - case length == 0: - return cty.TupleVal(nil), nil - case length != len(etys): - return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - ety := etys[i] - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.TupleVal(vals), nil -} - -func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("an object is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Object(atys)), nil - case length == 0: - return cty.ObjectVal(nil), nil - case length != len(atys): - return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", - len(atys), length) - } - - vals := make(map[string]cty.Value, length) - path = append(path, nil) - for i := 0; i < length; i++ { - key, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") - } - - path[len(path)-1] = cty.IndexStep{ - Key: cty.StringVal(key), - } - aty, exists := atys[key] - if !exists { - return cty.DynamicVal, path.NewErrorf("unsupported attribute") - } - - val, err := unmarshal(dec, aty, path) - if err != nil { - return cty.DynamicVal, err - } - - vals[key] = val - } - - return cty.ObjectVal(vals), nil -} - -func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - - switch { - case length == -1: - return cty.NullVal(cty.DynamicPseudoType), nil - case length != 2: - return cty.DynamicVal, path.NewErrorf( - "dynamic value array must have exactly two elements", - ) - } - - typeJSON, err := dec.DecodeBytes() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - var ty cty.Type - err = (&ty).UnmarshalJSON(typeJSON) - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - - return unmarshal(dec, ty, path) -} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index 5577c0f9..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// ErrPasswordTooLong is returned when the password passed to -// GenerateFromPassword is too long (i.e. > 72 bytes). -var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -// GenerateFromPassword does not accept passwords longer than 72 bytes, which -// is the longest password bcrypt will operate on. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - if len(password) > 72 { - return nil, ErrPasswordTooLong - } - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f195..00000000 --- a/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204..00000000 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d0407759..00000000 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index 425e8eec..00000000 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,536 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. -// -// CAST5 is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" - -import ( - "errors" - "math/bits" -) - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := bits.RotateLeft32(t, int(r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := bits.RotateLeft32(t, int(r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := bits.RotateLeft32(t, int(r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go deleted file mode 100644 index 94c71ac1..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s deleted file mode 100644 index 63cae9e6..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego - -#include "textflag.h" - -#define NUM_ROUNDS 10 - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD dst+0(FP), R1 - MOVD src+24(FP), R2 - MOVD src_len+32(FP), R3 - MOVD key+48(FP), R4 - MOVD nonce+56(FP), R6 - MOVD counter+64(FP), R7 - - MOVD $·constants(SB), R10 - MOVD $·incRotMatrix(SB), R11 - - MOVW (R7), R20 - - AND $~255, R3, R13 - ADD R2, R13, R12 // R12 for block end - AND $255, R3, R13 -loop: - MOVD $NUM_ROUNDS, R21 - VLD1 (R11), [V30.S4, V31.S4] - - // load contants - // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] - WORD $0x4D60E940 - - // load keys - // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] - WORD $0x4DFFE884 - // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] - WORD $0x4DFFE888 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V12.S4] - WORD $0x4D40C8EC - - // VLD3R (R6), [V13.S4, V14.S4, V15.S4] - WORD $0x4D40E8CD - - // update counter - VADD V30.S4, V12.S4, V12.S4 - -chacha: - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) - VADD V8.S4, V12.S4, V8.S4 - VADD V9.S4, V13.S4, V9.S4 - VADD V10.S4, V14.S4, V10.S4 - VADD V11.S4, V15.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $12, V16.S4, V4.S4 - VSHL $12, V17.S4, V5.S4 - VSHL $12, V18.S4, V6.S4 - VSHL $12, V19.S4, V7.S4 - VSRI $20, V16.S4, V4.S4 - VSRI $20, V17.S4, V5.S4 - VSRI $20, V18.S4, V6.S4 - VSRI $20, V19.S4, V7.S4 - - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) - VADD V12.S4, V8.S4, V8.S4 - VADD V13.S4, V9.S4, V9.S4 - VADD V14.S4, V10.S4, V10.S4 - VADD V15.S4, V11.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $7, V16.S4, V4.S4 - VSHL $7, V17.S4, V5.S4 - VSHL $7, V18.S4, V6.S4 - VSHL $7, V19.S4, V7.S4 - VSRI $25, V16.S4, V4.S4 - VSRI $25, V17.S4, V5.S4 - VSRI $25, V18.S4, V6.S4 - VSRI $25, V19.S4, V7.S4 - - // V0..V3 += V5..V7, V4 - // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) - VADD V0.S4, V5.S4, V0.S4 - VADD V1.S4, V6.S4, V1.S4 - VADD V2.S4, V7.S4, V2.S4 - VADD V3.S4, V4.S4, V3.S4 - VEOR V15.B16, V0.B16, V15.B16 - VEOR V12.B16, V1.B16, V12.B16 - VEOR V13.B16, V2.B16, V13.B16 - VEOR V14.B16, V3.B16, V14.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 12) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $12, V16.S4, V5.S4 - VSHL $12, V17.S4, V6.S4 - VSHL $12, V18.S4, V7.S4 - VSHL $12, V19.S4, V4.S4 - VSRI $20, V16.S4, V5.S4 - VSRI $20, V17.S4, V6.S4 - VSRI $20, V18.S4, V7.S4 - VSRI $20, V19.S4, V4.S4 - - // V0 += V5; V15 <<<= ((V0 XOR V15), 8) - // ... - VADD V5.S4, V0.S4, V0.S4 - VADD V6.S4, V1.S4, V1.S4 - VADD V7.S4, V2.S4, V2.S4 - VADD V4.S4, V3.S4, V3.S4 - VEOR V0.B16, V15.B16, V15.B16 - VEOR V1.B16, V12.B16, V12.B16 - VEOR V2.B16, V13.B16, V13.B16 - VEOR V3.B16, V14.B16, V14.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 7) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $7, V16.S4, V5.S4 - VSHL $7, V17.S4, V6.S4 - VSHL $7, V18.S4, V7.S4 - VSHL $7, V19.S4, V4.S4 - VSRI $25, V16.S4, V5.S4 - VSRI $25, V17.S4, V6.S4 - VSRI $25, V18.S4, V7.S4 - VSRI $25, V19.S4, V4.S4 - - SUB $1, R21 - CBNZ R21, chacha - - // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] - WORD $0x4D60E950 - - // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] - WORD $0x4DFFE894 - VADD V30.S4, V12.S4, V12.S4 - VADD V16.S4, V0.S4, V0.S4 - VADD V17.S4, V1.S4, V1.S4 - VADD V18.S4, V2.S4, V2.S4 - VADD V19.S4, V3.S4, V3.S4 - // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] - WORD $0x4DFFE898 - // restore R4 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V28.S4] - WORD $0x4D40C8FC - // VLD3R (R6), [V29.S4, V30.S4, V31.S4] - WORD $0x4D40E8DD - - VADD V20.S4, V4.S4, V4.S4 - VADD V21.S4, V5.S4, V5.S4 - VADD V22.S4, V6.S4, V6.S4 - VADD V23.S4, V7.S4, V7.S4 - VADD V24.S4, V8.S4, V8.S4 - VADD V25.S4, V9.S4, V9.S4 - VADD V26.S4, V10.S4, V10.S4 - VADD V27.S4, V11.S4, V11.S4 - VADD V28.S4, V12.S4, V12.S4 - VADD V29.S4, V13.S4, V13.S4 - VADD V30.S4, V14.S4, V14.S4 - VADD V31.S4, V15.S4, V15.S4 - - VZIP1 V1.S4, V0.S4, V16.S4 - VZIP2 V1.S4, V0.S4, V17.S4 - VZIP1 V3.S4, V2.S4, V18.S4 - VZIP2 V3.S4, V2.S4, V19.S4 - VZIP1 V5.S4, V4.S4, V20.S4 - VZIP2 V5.S4, V4.S4, V21.S4 - VZIP1 V7.S4, V6.S4, V22.S4 - VZIP2 V7.S4, V6.S4, V23.S4 - VZIP1 V9.S4, V8.S4, V24.S4 - VZIP2 V9.S4, V8.S4, V25.S4 - VZIP1 V11.S4, V10.S4, V26.S4 - VZIP2 V11.S4, V10.S4, V27.S4 - VZIP1 V13.S4, V12.S4, V28.S4 - VZIP2 V13.S4, V12.S4, V29.S4 - VZIP1 V15.S4, V14.S4, V30.S4 - VZIP2 V15.S4, V14.S4, V31.S4 - VZIP1 V18.D2, V16.D2, V0.D2 - VZIP2 V18.D2, V16.D2, V4.D2 - VZIP1 V19.D2, V17.D2, V8.D2 - VZIP2 V19.D2, V17.D2, V12.D2 - VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] - - VZIP1 V22.D2, V20.D2, V1.D2 - VZIP2 V22.D2, V20.D2, V5.D2 - VZIP1 V23.D2, V21.D2, V9.D2 - VZIP2 V23.D2, V21.D2, V13.D2 - VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] - VZIP1 V26.D2, V24.D2, V2.D2 - VZIP2 V26.D2, V24.D2, V6.D2 - VZIP1 V27.D2, V25.D2, V10.D2 - VZIP2 V27.D2, V25.D2, V14.D2 - VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] - VZIP1 V30.D2, V28.D2, V3.D2 - VZIP2 V30.D2, V28.D2, V7.D2 - VZIP1 V31.D2, V29.D2, V11.D2 - VZIP2 V31.D2, V29.D2, V15.D2 - VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] - VEOR V0.B16, V16.B16, V16.B16 - VEOR V1.B16, V17.B16, V17.B16 - VEOR V2.B16, V18.B16, V18.B16 - VEOR V3.B16, V19.B16, V19.B16 - VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) - VEOR V4.B16, V20.B16, V20.B16 - VEOR V5.B16, V21.B16, V21.B16 - VEOR V6.B16, V22.B16, V22.B16 - VEOR V7.B16, V23.B16, V23.B16 - VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) - VEOR V8.B16, V24.B16, V24.B16 - VEOR V9.B16, V25.B16, V25.B16 - VEOR V10.B16, V26.B16, V26.B16 - VEOR V11.B16, V27.B16, V27.B16 - VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) - VEOR V12.B16, V28.B16, V28.B16 - VEOR V13.B16, V29.B16, V29.B16 - VEOR V14.B16, V30.B16, V30.B16 - VEOR V15.B16, V31.B16, V31.B16 - VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) - - ADD $4, R20 - MOVW R20, (R7) // update counter - - CMP R2, R12 - BGT loop - - RET - - -DATA ·constants+0x00(SB)/4, $0x61707865 -DATA ·constants+0x04(SB)/4, $0x3320646e -DATA ·constants+0x08(SB)/4, $0x79622d32 -DATA ·constants+0x0c(SB)/4, $0x6b206574 -GLOBL ·constants(SB), NOPTR|RODATA, $32 - -DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 -DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 -DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 -DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 -DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 -DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 -DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B -DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F -GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go deleted file mode 100644 index 93eb5ae6..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms -// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/internal/alias" -) - -const ( - // KeySize is the size of the key used by this cipher, in bytes. - KeySize = 32 - - // NonceSize is the size of the nonce used with the standard variant of this - // cipher, in bytes. - // - // Note that this is too short to be safely generated at random if the same - // key is reused more than 2³² times. - NonceSize = 12 - - // NonceSizeX is the size of the nonce used with the XChaCha20 variant of - // this cipher, in bytes. - NonceSizeX = 24 -) - -// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key -// and nonce. A *Cipher implements the cipher.Stream interface. -type Cipher struct { - // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter - // (incremented after each block), and 3 of nonce. - key [8]uint32 - counter uint32 - nonce [3]uint32 - - // The last len bytes of buf are leftover key stream bytes from the previous - // XORKeyStream invocation. The size of buf depends on how many blocks are - // computed at a time by xorKeyStreamBlocks. - buf [bufSize]byte - len int - - // overflow is set when the counter overflowed, no more blocks can be - // generated, and the next XORKeyStream call should panic. - overflow bool - - // The counter-independent results of the first round are cached after they - // are computed the first time. - precompDone bool - p1, p5, p9, p13 uint32 - p2, p6, p10, p14 uint32 - p3, p7, p11, p15 uint32 -} - -var _ cipher.Stream = (*Cipher)(nil) - -// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given -// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, -// the XChaCha20 construction will be used. It returns an error if key or nonce -// have any other length. -// -// Note that ChaCha20, like all stream ciphers, is not authenticated and allows -// attackers to silently tamper with the plaintext. For this reason, it is more -// appropriate as a building block than as a standalone encryption mechanism. -// Instead, consider using package golang.org/x/crypto/chacha20poly1305. -func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { - // This function is split into a wrapper so that the Cipher allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - c := &Cipher{} - return newUnauthenticatedCipher(c, key, nonce) -} - -func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong key size") - } - if len(nonce) == NonceSizeX { - // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a - // derived key, allowing it to operate on a nonce of 24 bytes. See - // draft-irtf-cfrg-xchacha-01, Section 2.3. - key, _ = HChaCha20(key, nonce[0:16]) - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - nonce = cNonce - } else if len(nonce) != NonceSize { - return nil, errors.New("chacha20: wrong nonce size") - } - - key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint - c.key = [8]uint32{ - binary.LittleEndian.Uint32(key[0:4]), - binary.LittleEndian.Uint32(key[4:8]), - binary.LittleEndian.Uint32(key[8:12]), - binary.LittleEndian.Uint32(key[12:16]), - binary.LittleEndian.Uint32(key[16:20]), - binary.LittleEndian.Uint32(key[20:24]), - binary.LittleEndian.Uint32(key[24:28]), - binary.LittleEndian.Uint32(key[28:32]), - } - c.nonce = [3]uint32{ - binary.LittleEndian.Uint32(nonce[0:4]), - binary.LittleEndian.Uint32(nonce[4:8]), - binary.LittleEndian.Uint32(nonce[8:12]), - } - return c, nil -} - -// The constant first 4 words of the ChaCha20 state. -const ( - j0 uint32 = 0x61707865 // expa - j1 uint32 = 0x3320646e // nd 3 - j2 uint32 = 0x79622d32 // 2-by - j3 uint32 = 0x6b206574 // te k -) - -const blockSize = 64 - -// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. -// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 -// words each round, in columnar or diagonal groups of 4 at a time. -func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { - a += b - d ^= a - d = bits.RotateLeft32(d, 16) - c += d - b ^= c - b = bits.RotateLeft32(b, 12) - a += b - d ^= a - d = bits.RotateLeft32(d, 8) - c += d - b ^= c - b = bits.RotateLeft32(b, 7) - return a, b, c, d -} - -// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will -// behave as if (64 * counter) bytes had been encrypted so far. -// -// To prevent accidental counter reuse, SetCounter panics if counter is less -// than the current value. -// -// Note that the execution time of XORKeyStream is not independent of the -// counter value. -func (s *Cipher) SetCounter(counter uint32) { - // Internally, s may buffer multiple blocks, which complicates this - // implementation slightly. When checking whether the counter has rolled - // back, we must use both s.counter and s.len to determine how many blocks - // we have already output. - outputCounter := s.counter - uint32(s.len)/blockSize - if s.overflow || counter < outputCounter { - panic("chacha20: SetCounter attempted to rollback counter") - } - - // In the general case, we set the new counter value and reset s.len to 0, - // causing the next call to XORKeyStream to refill the buffer. However, if - // we're advancing within the existing buffer, we can save work by simply - // setting s.len. - if counter < s.counter { - s.len = int(s.counter-counter) * blockSize - } else { - s.counter = counter - s.len = 0 - } -} - -// XORKeyStream XORs each byte in the given slice with a byte from the -// cipher's key stream. Dst and src must overlap entirely or not at all. -// -// If len(dst) < len(src), XORKeyStream will panic. It is acceptable -// to pass a dst bigger than src, and in that case, XORKeyStream will -// only update dst[:len(src)] and will not touch the rest of dst. -// -// Multiple calls to XORKeyStream behave as if the concatenation of -// the src buffers was passed in a single run. That is, Cipher -// maintains state and does not reset at each XORKeyStream call. -func (s *Cipher) XORKeyStream(dst, src []byte) { - if len(src) == 0 { - return - } - if len(dst) < len(src) { - panic("chacha20: output smaller than input") - } - dst = dst[:len(src)] - if alias.InexactOverlap(dst, src) { - panic("chacha20: invalid buffer overlap") - } - - // First, drain any remaining key stream from a previous XORKeyStream. - if s.len != 0 { - keyStream := s.buf[bufSize-s.len:] - if len(src) < len(keyStream) { - keyStream = keyStream[:len(src)] - } - _ = src[len(keyStream)-1] // bounds check elimination hint - for i, b := range keyStream { - dst[i] = src[i] ^ b - } - s.len -= len(keyStream) - dst, src = dst[len(keyStream):], src[len(keyStream):] - } - if len(src) == 0 { - return - } - - // If we'd need to let the counter overflow and keep generating output, - // panic immediately. If instead we'd only reach the last block, remember - // not to generate any more output after the buffer is drained. - numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize - if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { - panic("chacha20: counter overflow") - } else if uint64(s.counter)+numBlocks == 1<<32 { - s.overflow = true - } - - // xorKeyStreamBlocks implementations expect input lengths that are a - // multiple of bufSize. Platform-specific ones process multiple blocks at a - // time, so have bufSizes that are a multiple of blockSize. - - full := len(src) - len(src)%bufSize - if full > 0 { - s.xorKeyStreamBlocks(dst[:full], src[:full]) - } - dst, src = dst[full:], src[full:] - - // If using a multi-block xorKeyStreamBlocks would overflow, use the generic - // one that does one block at a time. - const blocksPerBuf = bufSize / blockSize - if uint64(s.counter)+blocksPerBuf > 1<<32 { - s.buf = [bufSize]byte{} - numBlocks := (len(src) + blockSize - 1) / blockSize - buf := s.buf[bufSize-numBlocks*blockSize:] - copy(buf, src) - s.xorKeyStreamBlocksGeneric(buf, buf) - s.len = len(buf) - copy(dst, buf) - return - } - - // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and - // keep the leftover keystream for the next XORKeyStream invocation. - if len(src) > 0 { - s.buf = [bufSize]byte{} - copy(s.buf[:], src) - s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) - s.len = bufSize - copy(dst, s.buf[:]) - } -} - -func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { - if len(dst) != len(src) || len(dst)%blockSize != 0 { - panic("chacha20: internal error: wrong dst and/or src length") - } - - // To generate each block of key stream, the initial cipher state - // (represented below) is passed through 20 rounds of shuffling, - // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) - // or by diagonals (like 1, 6, 11, 12). - // - // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc - // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk - // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk - // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn - // - // c=constant k=key b=blockcount n=nonce - var ( - c0, c1, c2, c3 = j0, j1, j2, j3 - c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] - c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] - _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] - ) - - // Three quarters of the first round don't depend on the counter, so we can - // calculate them here, and reuse them for multiple blocks in the loop, and - // for future XORKeyStream invocations. - if !s.precompDone { - s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) - s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) - s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) - s.precompDone = true - } - - // A condition of len(src) > 0 would be sufficient, but this also - // acts as a bounds check elimination hint. - for len(src) >= 64 && len(dst) >= 64 { - // The remainder of the first column round. - fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) - - // The second diagonal round. - x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) - x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) - x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) - x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) - - // The remaining 18 rounds. - for i := 0; i < 9; i++ { - // Column round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Diagonal round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - // Add back the initial state to generate the key stream, then - // XOR the key stream with the source and write out the result. - addXor(dst[0:4], src[0:4], x0, c0) - addXor(dst[4:8], src[4:8], x1, c1) - addXor(dst[8:12], src[8:12], x2, c2) - addXor(dst[12:16], src[12:16], x3, c3) - addXor(dst[16:20], src[16:20], x4, c4) - addXor(dst[20:24], src[20:24], x5, c5) - addXor(dst[24:28], src[24:28], x6, c6) - addXor(dst[28:32], src[28:32], x7, c7) - addXor(dst[32:36], src[32:36], x8, c8) - addXor(dst[36:40], src[36:40], x9, c9) - addXor(dst[40:44], src[40:44], x10, c10) - addXor(dst[44:48], src[44:48], x11, c11) - addXor(dst[48:52], src[48:52], x12, s.counter) - addXor(dst[52:56], src[52:56], x13, c13) - addXor(dst[56:60], src[56:60], x14, c14) - addXor(dst[60:64], src[60:64], x15, c15) - - s.counter += 1 - - src, dst = src[blockSize:], dst[blockSize:] - } -} - -// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes -// key and a 16 bytes nonce. It returns an error if key or nonce have any other -// length. It is used as part of the XChaCha20 construction. -func HChaCha20(key, nonce []byte) ([]byte, error) { - // This function is split into a wrapper so that the slice allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - out := make([]byte, 32) - return hChaCha20(out, key, nonce) -} - -func hChaCha20(out, key, nonce []byte) ([]byte, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong HChaCha20 key size") - } - if len(nonce) != 16 { - return nil, errors.New("chacha20: wrong HChaCha20 nonce size") - } - - x0, x1, x2, x3 := j0, j1, j2, j3 - x4 := binary.LittleEndian.Uint32(key[0:4]) - x5 := binary.LittleEndian.Uint32(key[4:8]) - x6 := binary.LittleEndian.Uint32(key[8:12]) - x7 := binary.LittleEndian.Uint32(key[12:16]) - x8 := binary.LittleEndian.Uint32(key[16:20]) - x9 := binary.LittleEndian.Uint32(key[20:24]) - x10 := binary.LittleEndian.Uint32(key[24:28]) - x11 := binary.LittleEndian.Uint32(key[28:32]) - x12 := binary.LittleEndian.Uint32(nonce[0:4]) - x13 := binary.LittleEndian.Uint32(nonce[4:8]) - x14 := binary.LittleEndian.Uint32(nonce[8:12]) - x15 := binary.LittleEndian.Uint32(nonce[12:16]) - - for i := 0; i < 10; i++ { - // Diagonal round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Column round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - _ = out[31] // bounds check elimination hint - binary.LittleEndian.PutUint32(out[0:4], x0) - binary.LittleEndian.PutUint32(out[4:8], x1) - binary.LittleEndian.PutUint32(out[8:12], x2) - binary.LittleEndian.PutUint32(out[12:16], x3) - binary.LittleEndian.PutUint32(out[16:20], x12) - binary.LittleEndian.PutUint32(out[20:24], x13) - binary.LittleEndian.PutUint32(out[24:28], x14) - binary.LittleEndian.PutUint32(out[28:32], x15) - return out, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go deleted file mode 100644 index 025b4989..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego - -package chacha20 - -const bufSize = blockSize - -func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { - s.xorKeyStreamBlocksGeneric(dst, src) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go deleted file mode 100644 index da420b2e..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s deleted file mode 100644 index 5c0fed26..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on CRYPTOGAMS code with the following comment: -// # ==================================================================== -// # Written by Andy Polyakov for the OpenSSL -// # project. The module is, however, dual licensed under OpenSSL and -// # CRYPTOGAMS licenses depending on where you obtain it. For further -// # details see http://www.openssl.org/~appro/cryptogams/. -// # ==================================================================== - -// Code for the perl script that generates the ppc64 assembler -// can be found in the cryptogams repository at the link below. It is based on -// the original from openssl. - -// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 - -// The differences in this and the original implementation are -// due to the calling conventions and initialization of constants. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -#define OUT R3 -#define INP R4 -#define LEN R5 -#define KEY R6 -#define CNT R7 -#define TMP R15 - -#define CONSTBASE R16 -#define BLOCKS R17 - -DATA consts<>+0x00(SB)/8, $0x3320646e61707865 -DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 -DATA consts<>+0x10(SB)/8, $0x0000000000000001 -DATA consts<>+0x18(SB)/8, $0x0000000000000000 -DATA consts<>+0x20(SB)/8, $0x0000000000000004 -DATA consts<>+0x28(SB)/8, $0x0000000000000000 -DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d -DATA consts<>+0x38(SB)/8, $0x0203000106070405 -DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c -DATA consts<>+0x48(SB)/8, $0x0102030005060704 -DATA consts<>+0x50(SB)/8, $0x6170786561707865 -DATA consts<>+0x58(SB)/8, $0x6170786561707865 -DATA consts<>+0x60(SB)/8, $0x3320646e3320646e -DATA consts<>+0x68(SB)/8, $0x3320646e3320646e -DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x90(SB)/8, $0x0000000100000000 -DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 - -//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) -TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 - MOVD out+0(FP), OUT - MOVD inp+8(FP), INP - MOVD len+16(FP), LEN - MOVD key+24(FP), KEY - MOVD counter+32(FP), CNT - - // Addressing for constants - MOVD $consts<>+0x00(SB), CONSTBASE - MOVD $16, R8 - MOVD $32, R9 - MOVD $48, R10 - MOVD $64, R11 - SRD $6, LEN, BLOCKS - // V16 - LXVW4X (CONSTBASE)(R0), VS48 - ADD $80,CONSTBASE - - // Load key into V17,V18 - LXVW4X (KEY)(R0), VS49 - LXVW4X (KEY)(R8), VS50 - - // Load CNT, NONCE into V19 - LXVW4X (CNT)(R0), VS51 - - // Clear V27 - VXOR V27, V27, V27 - - // V28 - LXVW4X (CONSTBASE)(R11), VS60 - - // splat slot from V19 -> V26 - VSPLTW $0, V19, V26 - - VSLDOI $4, V19, V27, V19 - VSLDOI $12, V27, V19, V19 - - VADDUWM V26, V28, V26 - - MOVD $10, R14 - MOVD R14, CTR - -loop_outer_vsx: - // V0, V1, V2, V3 - LXVW4X (R0)(CONSTBASE), VS32 - LXVW4X (R8)(CONSTBASE), VS33 - LXVW4X (R9)(CONSTBASE), VS34 - LXVW4X (R10)(CONSTBASE), VS35 - - // splat values from V17, V18 into V4-V11 - VSPLTW $0, V17, V4 - VSPLTW $1, V17, V5 - VSPLTW $2, V17, V6 - VSPLTW $3, V17, V7 - VSPLTW $0, V18, V8 - VSPLTW $1, V18, V9 - VSPLTW $2, V18, V10 - VSPLTW $3, V18, V11 - - // VOR - VOR V26, V26, V12 - - // splat values from V19 -> V13, V14, V15 - VSPLTW $1, V19, V13 - VSPLTW $2, V19, V14 - VSPLTW $3, V19, V15 - - // splat const values - VSPLTISW $-16, V27 - VSPLTISW $12, V28 - VSPLTISW $8, V29 - VSPLTISW $7, V30 - -loop_vsx: - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V28, V4 - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V30, V4 - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - VRLW V4, V28, V4 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - VRLW V4, V30, V4 - BC 16, LT, loop_vsx - - VADDUWM V12, V26, V12 - - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 - - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 - - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 - - XXPERMDI VS32, VS34, $0, VS33 - XXPERMDI VS32, VS34, $3, VS35 - XXPERMDI VS59, VS60, $0, VS32 - XXPERMDI VS59, VS60, $3, VS34 - - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 - - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 - - XXPERMDI VS36, VS38, $0, VS37 - XXPERMDI VS36, VS38, $3, VS39 - XXPERMDI VS61, VS62, $0, VS36 - XXPERMDI VS61, VS62, $3, VS38 - - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 - - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 - - XXPERMDI VS40, VS42, $0, VS41 - XXPERMDI VS40, VS42, $3, VS43 - XXPERMDI VS59, VS60, $0, VS40 - XXPERMDI VS59, VS60, $3, VS42 - - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 - - VSPLTISW $4, V27 - VADDUWM V26, V27, V26 - - XXPERMDI VS44, VS46, $0, VS45 - XXPERMDI VS44, VS46, $3, VS47 - XXPERMDI VS61, VS62, $0, VS44 - XXPERMDI VS61, VS62, $3, VS46 - - VADDUWM V0, V16, V0 - VADDUWM V4, V17, V4 - VADDUWM V8, V18, V8 - VADDUWM V12, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - // Bottom of loop - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V1, V16, V0 - VADDUWM V5, V17, V4 - VADDUWM V9, V18, V8 - VADDUWM V13, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - VXOR V27, V0, V27 - - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(V10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V2, V16, V0 - VADDUWM V6, V17, V4 - VADDUWM V10, V18, V8 - VADDUWM V14, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V3, V16, V0 - VADDUWM V7, V17, V4 - VADDUWM V11, V18, V8 - VADDUWM V15, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - - MOVD $10, R14 - MOVD R14, CTR - BNE loop_outer_vsx - -done_vsx: - // Increment counter by number of 64 byte blocks - MOVD (CNT), R14 - ADD BLOCKS, R14 - MOVD R14, (CNT) - RET - -tail_vsx: - ADD $32, R1, R11 - MOVD LEN, CTR - - // Save values on stack to copy from - STXVW4X VS32, (R11)(R0) - STXVW4X VS36, (R11)(R8) - STXVW4X VS40, (R11)(R9) - STXVW4X VS44, (R11)(R10) - ADD $-1, R11, R12 - ADD $-1, INP - ADD $-1, OUT - -looptail_vsx: - // Copying the result to OUT - // in bytes. - MOVBZU 1(R12), KEY - MOVBZU 1(INP), TMP - XOR KEY, TMP, KEY - MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx - - // Clear the stack values - STXVW4X VS48, (R11)(R0) - STXVW4X VS48, (R11)(R8) - STXVW4X VS48, (R11)(R9) - STXVW4X VS48, (R11)(R10) - BR done_vsx diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go deleted file mode 100644 index 4652247b..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20 - -import "golang.org/x/sys/cpu" - -var haveAsm = cpu.S390X.HasVX - -const bufSize = 256 - -// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only -// be called when the vector facility is available. Implementation in asm_s390x.s. -// -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - if cpu.S390X.HasVX { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) - } else { - c.xorKeyStreamBlocksGeneric(dst, src) - } -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s deleted file mode 100644 index f3ef5a01..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "go_asm.h" -#include "textflag.h" - -// This is an implementation of the ChaCha20 encryption algorithm as -// specified in RFC 7539. It uses vector instructions to compute -// 4 keystream blocks in parallel (256 bytes) which are then XORed -// with the bytes in the input slice. - -GLOBL ·constants<>(SB), RODATA|NOPTR, $32 -// BSWAP: swap bytes in each 4-byte element -DATA ·constants<>+0x00(SB)/4, $0x03020100 -DATA ·constants<>+0x04(SB)/4, $0x07060504 -DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 -DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c -// J0: [j0, j1, j2, j3] -DATA ·constants<>+0x10(SB)/4, $0x61707865 -DATA ·constants<>+0x14(SB)/4, $0x3320646e -DATA ·constants<>+0x18(SB)/4, $0x79622d32 -DATA ·constants<>+0x1c(SB)/4, $0x6b206574 - -#define BSWAP V5 -#define J0 V6 -#define KEY0 V7 -#define KEY1 V8 -#define NONCE V9 -#define CTR V10 -#define M0 V11 -#define M1 V12 -#define M2 V13 -#define M3 V14 -#define INC V15 -#define X0 V16 -#define X1 V17 -#define X2 V18 -#define X3 V19 -#define X4 V20 -#define X5 V21 -#define X6 V22 -#define X7 V23 -#define X8 V24 -#define X9 V25 -#define X10 V26 -#define X11 V27 -#define X12 V28 -#define X13 V29 -#define X14 V30 -#define X15 V31 - -#define NUM_ROUNDS 20 - -#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $16, a2, a2 \ - VERLLF $16, b2, b2 \ - VERLLF $16, c2, c2 \ - VERLLF $16, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $12, a1, a1 \ - VERLLF $12, b1, b1 \ - VERLLF $12, c1, c1 \ - VERLLF $12, d1, d1 \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $8, a2, a2 \ - VERLLF $8, b2, b2 \ - VERLLF $8, c2, c2 \ - VERLLF $8, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $7, a1, a1 \ - VERLLF $7, b1, b1 \ - VERLLF $7, c1, c1 \ - VERLLF $7, d1, d1 - -#define PERMUTE(mask, v0, v1, v2, v3) \ - VPERM v0, v0, mask, v0 \ - VPERM v1, v1, mask, v1 \ - VPERM v2, v2, mask, v2 \ - VPERM v3, v3, mask, v3 - -#define ADDV(x, v0, v1, v2, v3) \ - VAF x, v0, v0 \ - VAF x, v1, v1 \ - VAF x, v2, v2 \ - VAF x, v3, v3 - -#define XORV(off, dst, src, v0, v1, v2, v3) \ - VLM off(src), M0, M3 \ - PERMUTE(BSWAP, v0, v1, v2, v3) \ - VX v0, M0, M0 \ - VX v1, M1, M1 \ - VX v2, M2, M2 \ - VX v3, M3, M3 \ - VSTM M0, M3, off(dst) - -#define SHUFFLE(a, b, c, d, t, u, v, w) \ - VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} - VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} - VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} - VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} - VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} - VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} - VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} - VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD $·constants<>(SB), R1 - MOVD dst+0(FP), R2 // R2=&dst[0] - LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) - MOVD key+48(FP), R5 // R5=key - MOVD nonce+56(FP), R6 // R6=nonce - MOVD counter+64(FP), R7 // R7=counter - - // load BSWAP and J0 - VLM (R1), BSWAP, J0 - - // setup - MOVD $95, R0 - VLM (R5), KEY0, KEY1 - VLL R0, (R6), NONCE - VZERO M0 - VLEIB $7, $32, M0 - VSRLB M0, NONCE, NONCE - - // initialize counter values - VLREPF (R7), CTR - VZERO INC - VLEIF $1, $1, INC - VLEIF $2, $2, INC - VLEIF $3, $3, INC - VAF INC, CTR, CTR - VREPIF $4, INC - -chacha: - VREPF $0, J0, X0 - VREPF $1, J0, X1 - VREPF $2, J0, X2 - VREPF $3, J0, X3 - VREPF $0, KEY0, X4 - VREPF $1, KEY0, X5 - VREPF $2, KEY0, X6 - VREPF $3, KEY0, X7 - VREPF $0, KEY1, X8 - VREPF $1, KEY1, X9 - VREPF $2, KEY1, X10 - VREPF $3, KEY1, X11 - VLR CTR, X12 - VREPF $1, NONCE, X13 - VREPF $2, NONCE, X14 - VREPF $3, NONCE, X15 - - MOVD $(NUM_ROUNDS/2), R1 - -loop: - ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) - ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) - - ADD $-1, R1 - BNE loop - - // decrement length - ADD $-256, R4 - - // rearrange vectors - SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) - ADDV(J0, X0, X1, X2, X3) - SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) - ADDV(KEY0, X4, X5, X6, X7) - SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) - ADDV(KEY1, X8, X9, X10, X11) - VAF CTR, X12, X12 - SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) - ADDV(NONCE, X12, X13, X14, X15) - - // increment counters - VAF INC, CTR, CTR - - // xor keystream with plaintext - XORV(0*64, R2, R3, X0, X4, X8, X12) - XORV(1*64, R2, R3, X1, X5, X9, X13) - XORV(2*64, R2, R3, X2, X6, X10, X14) - XORV(3*64, R2, R3, X3, X7, X11, X15) - - // increment pointers - MOVD $256(R2), R2 - MOVD $256(R3), R3 - - CMPBNE R4, $0, chacha - - VSTEF $0, CTR, (R7) - RET diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go deleted file mode 100644 index c2d04851..00000000 --- a/vendor/golang.org/x/crypto/chacha20/xor.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found src the LICENSE file. - -package chacha20 - -import "runtime" - -// Platforms that have fast unaligned 32-bit little endian accesses. -const unaligned = runtime.GOARCH == "386" || - runtime.GOARCH == "amd64" || - runtime.GOARCH == "arm64" || - runtime.GOARCH == "ppc64le" || - runtime.GOARCH == "s390x" - -// addXor reads a little endian uint32 from src, XORs it with (a + b) and -// places the result in little endian byte order in dst. -func addXor(dst, src []byte, a, b uint32) { - _, _ = src[3], dst[3] // bounds check elimination hint - if unaligned { - // The compiler should optimize this code into - // 32-bit unaligned little endian loads and stores. - // TODO: delete once the compiler does a reliably - // good job with the generic code below. - // See issue #25111 for more details. - v := uint32(src[0]) - v |= uint32(src[1]) << 8 - v |= uint32(src[2]) << 16 - v |= uint32(src[3]) << 24 - v ^= a + b - dst[0] = byte(v) - dst[1] = byte(v >> 8) - dst[2] = byte(v >> 16) - dst[3] = byte(v >> 24) - } else { - a += b - dst[0] = src[0] ^ byte(a) - dst[1] = src[1] ^ byte(a>>8) - dst[2] = src[2] ^ byte(a>>16) - dst[3] = src[3] ^ byte(a>>24) - } -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index bc62161d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -package curve25519 // import "golang.org/x/crypto/curve25519" - -import ( - "crypto/subtle" - "errors" - "strconv" - - "golang.org/x/crypto/curve25519/internal/field" -) - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - ScalarMult(dst, scalar, &basePoint) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -func init() { Basepoint = basePoint[:] } - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") - } - if l := len(point); l != 32 { - return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - checkBasepoint() - ScalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - ScalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, errors.New("bad input point: low order point") - } - } - return dst[:], nil -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2⁵¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index edcf163c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 293f013c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index ddb6c9b8..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index af459ef5..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 5c91e458..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index 234a5b2e..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego -// +build !arm64 !gc purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 2671217d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, - // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2⁵¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) - // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² - // r0 < 2⁷ × 2⁵² × 2⁵² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2⁵² × 2⁵² - // r4 < 2¹⁰⁷ - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2⁵¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction -// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go deleted file mode 100644 index 69c17f82..00000000 --- a/vendor/golang.org/x/crypto/internal/alias/alias.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego -// +build !purego - -// Package alias implements memory aliasing tests. -package alias - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go deleted file mode 100644 index 4775b0a4..00000000 --- a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -// Package alias implements memory aliasing tests. -package alias - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index 45b5c966..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index ed52b341..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go deleted file mode 100644 index f184b67d..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego - -package poly1305 - -type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go deleted file mode 100644 index 4aaea810..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package poly1305 implements Poly1305 one-time message authentication code as -// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. -// -// Poly1305 is a fast, one-time authentication function. It is infeasible for an -// attacker to generate an authenticator for a message without the key. However, a -// key must only be used for a single message. Authenticating two different -// messages with the same key allows an attacker to forge authenticators for other -// messages with the same key. -// -// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -// used with a fixed key in order to generate one-time keys from an nonce. -// However, in this package AES isn't used and the one-time key is specified -// directly. -package poly1305 - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := New(key) - h.Write(m) - h.Sum(out[:0]) -} - -// Verify returns true if mac is a valid authenticator for m with the given key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} - -// New returns a new MAC computing an authentication -// tag of all data written to it with the given key. -// This allows writing the message progressively instead -// of passing it as a single slice. Common users should use -// the Sum function instead. -// -// The key must be unique for each message, as authenticating -// two different messages with the same key allows an attacker -// to forge messages at will. -func New(key *[32]byte) *MAC { - m := &MAC{} - initialize(key, &m.macState) - return m -} - -// MAC is an io.Writer computing an authentication tag -// of the data written to it. -// -// MAC cannot be used like common hash.Hash implementations, -// because using a poly1305 key twice breaks its security. -// Therefore writing data to a running MAC after calling -// Sum or Verify causes it to panic. -type MAC struct { - mac // platform-dependent implementation - - finalized bool -} - -// Size returns the number of bytes Sum will return. -func (h *MAC) Size() int { return TagSize } - -// Write adds more data to the running message authentication code. -// It never returns an error. -// -// It must not be called after the first call of Sum or Verify. -func (h *MAC) Write(p []byte) (n int, err error) { - if h.finalized { - panic("poly1305: write to MAC after Sum or Verify") - } - return h.mac.Write(p) -} - -// Sum computes the authenticator of all data written to the -// message authentication code. -func (h *MAC) Sum(b []byte) []byte { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return append(b, mac[:]...) -} - -// Verify returns whether the authenticator of all data written to -// the message authentication code matches the expected value. -func (h *MAC) Verify(expected []byte) bool { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return subtle.ConstantTimeCompare(expected, mac[:]) == 1 -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go deleted file mode 100644 index 6d522333..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s deleted file mode 100644 index 1d74f0f8..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVQ state+0(FP), DI - MOVQ msg_base+8(FP), SI - MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 - JB bytes_between_0_and_15 - -loop: - POLY1305_ADD(SI, R8, R9, R10) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop - -bytes_between_0_and_15: - TESTQ R15, R15 - JZ done - MOVQ $1, BX - XORQ CX, CX - XORQ R13, R13 - ADDQ R15, SI - -flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX - MOVB -1(SI), R13 - XORQ R13, BX - DECQ SI - DECQ R15 - JNZ flush_buffer - - ADDQ BX, R8 - ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 - JMP multiply - -done: - MOVQ R8, 0(DI) - MOVQ R9, 8(DI) - MOVQ R10, 16(DI) - RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go deleted file mode 100644 index e041da5e..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides the generic implementation of Sum and MAC. Other files -// might provide optimized assembly implementations of some of this code. - -package poly1305 - -import "encoding/binary" - -// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag -// for a 64 bytes message is approximately -// -// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 -// -// for some secret r and s. It can be computed sequentially like -// -// for len(msg) > 0: -// h += read(msg, 16) -// h *= r -// h %= 2¹³⁰ - 5 -// return h + s -// -// All the complexity is about doing performant constant-time math on numbers -// larger than any available numeric type. - -func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMACGeneric(key) - h.Write(msg) - h.Sum(out) -} - -func newMACGeneric(key *[32]byte) macGeneric { - m := macGeneric{} - initialize(key, &m.macState) - return m -} - -// macState holds numbers in saturated 64-bit little-endian limbs. That is, -// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. -type macState struct { - // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but - // can grow larger during and after rounds. It must, however, remain below - // 2 * (2¹³⁰ - 5). - h [3]uint64 - // r and s are the private key components. - r [2]uint64 - s [2]uint64 -} - -type macGeneric struct { - macState - - buffer [TagSize]byte - offset int -} - -// Write splits the incoming message into TagSize chunks, and passes them to -// update. It buffers incomplete chunks. -func (h *macGeneric) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - updateGeneric(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - updateGeneric(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -// Sum flushes the last incomplete chunk from the buffer, if any, and generates -// the MAC output. It does not modify its state, in order to allow for multiple -// calls to Sum, even if no Write is allowed after Sum. -func (h *macGeneric) Sum(out *[TagSize]byte) { - state := h.macState - if h.offset > 0 { - updateGeneric(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} - -// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It -// clears some bits of the secret coefficient to make it possible to implement -// multiplication more efficiently. -const ( - rMask0 = 0x0FFFFFFC0FFFFFFF - rMask1 = 0x0FFFFFFC0FFFFFFC -) - -// initialize loads the 256-bit key into the two 128-bit secret values r and s. -func initialize(key *[32]byte, m *macState) { - m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 - m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 - m.s[0] = binary.LittleEndian.Uint64(key[16:24]) - m.s[1] = binary.LittleEndian.Uint64(key[24:32]) -} - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) - return uint128{lo, hi} -} - -func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) - if c != 0 { - panic("poly1305: unexpected overflow") - } - return uint128{lo, hi} -} - -func shiftRightBy2(a uint128) uint128 { - a.lo = a.lo>>2 | (a.hi&3)<<62 - a.hi = a.hi >> 2 - return a -} - -// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of -// 128 bits of message, it computes -// -// h₊ = (h + m) * r mod 2¹³⁰ - 5 -// -// If the msg length is not a multiple of TagSize, it assumes the last -// incomplete chunk is the final one. -func updateGeneric(state *macState, msg []byte) { - h0, h1, h2 := state.h[0], state.h[1], state.h[2] - r0, r1 := state.r[0], state.r[1] - - for len(msg) > 0 { - var c uint64 - - // For the first step, h + m, we use a chain of bits.Add64 intrinsics. - // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially - // reduced at the end of the multiplication below. - // - // The spec requires us to set a bit just above the message size, not to - // hide leading zeroes. For full chunks, that's 1 << 128, so we can just - // add 1 to the most significant (2¹²⁸) limb, h2. - if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) - h2 += c + 1 - - msg = msg[TagSize:] - } else { - var buf [TagSize]byte - copy(buf[:], msg) - buf[len(msg)] = 1 - - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) - h2 += c - - msg = nil - } - - // Multiplication of big number limbs is similar to elementary school - // columnar multiplication. Instead of digits, there are 64-bit limbs. - // - // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. - // - // h2 h1 h0 x - // r1 r0 = - // ---------------- - // h2r0 h1r0 h0r0 <-- individual 128-bit products - // + h2r1 h1r1 h0r1 - // ------------------------ - // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs - // ------------------------ - // m3.hi m2.hi m1.hi m0.hi <-- carry propagation - // + m3.lo m2.lo m1.lo m0.lo - // ------------------------------- - // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs - // - // The main difference from pen-and-paper multiplication is that we do - // carry propagation in a separate step, as if we wrote two digit sums - // at first (the 128-bit limbs), and then carried the tens all at once. - - h0r0 := mul64(h0, r0) - h1r0 := mul64(h1, r0) - h2r0 := mul64(h2, r0) - h0r1 := mul64(h0, r1) - h1r1 := mul64(h1, r1) - h2r1 := mul64(h2, r1) - - // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their - // top 4 bits cleared by rMask{0,1}, we know that their product is not going - // to overflow 64 bits, so we can ignore the high part of the products. - // - // This also means that the product doesn't have a fifth limb (t4). - if h2r0.hi != 0 { - panic("poly1305: unexpected overflow") - } - if h2r1.hi != 0 { - panic("poly1305: unexpected overflow") - } - - m0 := h0r0 - m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again - m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. - m3 := h2r1 - - t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) - - // Now we have the result as 4 64-bit limbs, and we need to reduce it - // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do - // a cheap partial reduction according to the reduction identity - // - // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 - // - // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is - // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the - // assumptions we make about h in the rest of the code. - // - // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 - - // We split the final result at the 2¹³⁰ mark into h and cc, the carry. - // Note that the carry bits are effectively shifted left by 2, in other - // words, cc = c * 4 for the c in the reduction identity. - h0, h1, h2 = t0, t1, t2&maskLow2Bits - cc := uint128{t2 & maskNotLow2Bits, t3} - - // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - cc = shiftRightBy2(cc) - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most - // - // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 - } - - state.h[0], state.h[1], state.h[2] = h0, h1, h2 -} - -const ( - maskLow2Bits uint64 = 0x0000000000000003 - maskNotLow2Bits uint64 = ^maskLow2Bits -) - -// select64 returns x if v == 1 and y if v == 0, in constant time. -func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } - -// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. -const ( - p0 = 0xFFFFFFFFFFFFFFFB - p1 = 0xFFFFFFFFFFFFFFFF - p2 = 0x0000000000000003 -) - -// finalize completes the modular reduction of h and computes -// -// out = h + s mod 2¹²⁸ -func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { - h0, h1, h2 := h[0], h[1], h[2] - - // After the partial reduction in updateGeneric, h might be more than - // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction - // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the - // result if the subtraction underflows, and t otherwise. - - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) - - // h = h if h < p else h - p - h0 = select64(b, h0, hMinusP0) - h1 = select64(b, h1, hMinusP1) - - // Finally, we compute the last Poly1305 step - // - // tag = h + s mod 2¹²⁸ - // - // by just doing a wide addition with the 128 low bits of h and discarding - // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) - - binary.LittleEndian.PutUint64(out[0:8], h0) - binary.LittleEndian.PutUint64(out[8:16], h1) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go deleted file mode 100644 index 4a069941..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s deleted file mode 100644 index 58422aad..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -// This was ported from the amd64 implementation. - -#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ - MOVD $1, t2; \ - ADDC t0, h0, h0; \ - ADDE t1, h1, h1; \ - ADDE t2, h2; \ - ADD $16, msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ - MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ - MULHDU r0, h0, t1; \ - MULHDU r0, h1, t5; \ - ADDC t4, t1, t1; \ - MULLD r0, h2, t2; \ - ADDZE t5; \ - MULHDU r1, h0, t4; \ - MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ - ADDC h0, t1, t1; \ - MULLD h2, r1, t3; \ - ADDZE t4, h0; \ - MULHDU r1, h1, t5; \ - MULLD r1, h1, t4; \ - ADDC t4, t2, t2; \ - ADDE t5, t3, t3; \ - ADDC h0, t2, t2; \ - MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ - ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ - ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ - SLD $62, t3, t4; \ - SRD $2, t2; \ - ADDZE h2; \ - OR t4, t2, t2; \ - SRD $2, t3; \ - ADDC t2, h0, h0; \ - ADDE t3, h1, h1; \ - ADDZE h2 - -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVD state+0(FP), R3 - MOVD msg_base+8(FP), R4 - MOVD msg_len+16(FP), R5 - - MOVD 0(R3), R8 // h0 - MOVD 8(R3), R9 // h1 - MOVD 16(R3), R10 // h2 - MOVD 24(R3), R11 // r0 - MOVD 32(R3), R12 // r1 - - CMP R5, $16 - BLT bytes_between_0_and_15 - -loop: - POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) - ADD $-16, R5 - CMP R5, $16 - BGE loop - -bytes_between_0_and_15: - CMP R5, $0 - BEQ done - MOVD $0, R16 // h0 - MOVD $0, R17 // h1 - -flush_buffer: - CMP R5, $8 - BLE just1 - - MOVD $8, R21 - SUB R21, R5, R21 - - // Greater than 8 -- load the rightmost remaining bytes in msg - // and put into R17 (h1) - MOVD (R4)(R21), R17 - MOVD $16, R22 - - // Find the offset to those bytes - SUB R5, R22, R22 - SLD $3, R22 - - // Shift to get only the bytes in msg - SRD R22, R17, R17 - - // Put 1 at high end - MOVD $1, R23 - SLD $3, R21 - SLD R21, R23, R23 - OR R23, R17, R17 - - // Remainder is 8 - MOVD $8, R5 - -just1: - CMP R5, $8 - BLT less8 - - // Exactly 8 - MOVD (R4), R16 - - CMP R17, $0 - - // Check if we've already set R17; if not - // set 1 to indicate end of msg. - BNE carry - MOVD $1, R17 - BR carry - -less8: - MOVD $0, R16 // h0 - MOVD $0, R22 // shift count - CMP R5, $4 - BLT less4 - MOVWZ (R4), R16 - ADD $4, R4 - ADD $-4, R5 - MOVD $32, R22 - -less4: - CMP R5, $2 - BLT less2 - MOVHZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $16, R22 - ADD $-2, R5 - ADD $2, R4 - -less2: - CMP R5, $0 - BEQ insert1 - MOVBZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $8, R22 - -insert1: - // Insert 1 at end of msg - MOVD $1, R21 - SLD R22, R21, R21 - OR R16, R21, R16 - -carry: - // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDZE R10, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply - -done: - // Save h0, h1, h2 in state - MOVD R8, 0(R3) - MOVD R9, 8(R3) - MOVD R10, 16(R3) - RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go deleted file mode 100644 index ec959668..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -import ( - "golang.org/x/sys/cpu" -) - -// updateVX is an assembly implementation of Poly1305 that uses vector -// instructions. It must only be called if the vector facility (vx) is -// available. -// -//go:noescape -func updateVX(state *macState, msg []byte) - -// mac is a replacement for macGeneric that uses a larger buffer and redirects -// calls that would have gone to updateGeneric to updateVX if the vector -// facility is installed. -// -// A larger buffer is required for good performance because the vector -// implementation has a higher fixed cost per call than the generic -// implementation. -type mac struct { - macState - - buffer [16 * TagSize]byte // size must be a multiple of block size (16) - offset int -} - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < len(h.buffer) { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - if cpu.S390X.HasVX { - updateVX(&h.macState, h.buffer[:]) - } else { - updateGeneric(&h.macState, h.buffer[:]) - } - } - - tail := len(p) % len(h.buffer) // number of bytes to copy into buffer - body := len(p) - tail // number of bytes to process now - if body > 0 { - if cpu.S390X.HasVX { - updateVX(&h.macState, p[:body]) - } else { - updateGeneric(&h.macState, p[:body]) - } - } - h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 - return nn, nil -} - -func (h *mac) Sum(out *[TagSize]byte) { - state := h.macState - remainder := h.buffer[:h.offset] - - // Use the generic implementation if we have 2 or fewer blocks left - // to sum. The vector implementation has a higher startup time. - if cpu.S390X.HasVX && len(remainder) > 2*TagSize { - updateVX(&state, remainder) - } else if len(remainder) > 0 { - updateGeneric(&state, remainder) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s deleted file mode 100644 index aa9e0494..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -// This implementation of Poly1305 uses the vector facility (vx) -// to process up to 2 blocks (32 bytes) per iteration using an -// algorithm based on the one described in: -// -// NEON crypto, Daniel J. Bernstein & Peter Schwabe -// https://cryptojedi.org/papers/neoncrypto-20120320.pdf -// -// This algorithm uses 5 26-bit limbs to represent a 130-bit -// value. These limbs are, for the most part, zero extended and -// placed into 64-bit vector register elements. Each vector -// register is 128-bits wide and so holds 2 of these elements. -// Using 26-bit limbs allows us plenty of headroom to accommodate -// accumulations before and after multiplication without -// overflowing either 32-bits (before multiplication) or 64-bits -// (after multiplication). -// -// In order to parallelise the operations required to calculate -// the sum we use two separate accumulators and then sum those -// in an extra final step. For compatibility with the generic -// implementation we perform this summation at the end of every -// updateVX call. -// -// To use two accumulators we must multiply the message blocks -// by r² rather than r. Only the final message block should be -// multiplied by r. -// -// Example: -// -// We want to calculate the sum (h) for a 64 byte message (m): -// -// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r -// -// To do this we split the calculation into the even indices -// and odd indices of the message. These form our SIMD 'lanes': -// -// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 -// m[16:32]r³ + m[48:64]r <- lane 1 -// -// To calculate this iteratively we refactor so that both lanes -// are written in terms of r² and r: -// -// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 -// (m[16:32]r² + m[48:64])r <- lane 1 -// ^ ^ -// | coefficients for second iteration -// coefficients for first iteration -// -// So in this case we would have two iterations. In the first -// both lanes are multiplied by r². In the second only the -// first lane is multiplied by r² and the second lane is -// instead multiplied by r. This gives use the odd and even -// powers of r that we need from the original equation. -// -// Notation: -// -// h - accumulator -// r - key -// m - message -// -// [a, b] - SIMD register holding two 64-bit values -// [a, b, c, d] - SIMD register holding four 32-bit values -// xᵢ[n] - limb n of variable x with bit width i -// -// Limbs are expressed in little endian order, so for 26-bit -// limbs x₂₆[4] will be the most significant limb and x₂₆[0] -// will be the least significant limb. - -// masking constants -#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits -#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits - -// expansion constants (see EXPAND macro) -#define EX0 V2 -#define EX1 V3 -#define EX2 V4 - -// key (r², r or 1 depending on context) -#define R_0 V5 -#define R_1 V6 -#define R_2 V7 -#define R_3 V8 -#define R_4 V9 - -// precalculated coefficients (5r², 5r or 0 depending on context) -#define R5_1 V10 -#define R5_2 V11 -#define R5_3 V12 -#define R5_4 V13 - -// message block (m) -#define M_0 V14 -#define M_1 V15 -#define M_2 V16 -#define M_3 V17 -#define M_4 V18 - -// accumulator (h) -#define H_0 V19 -#define H_1 V20 -#define H_2 V21 -#define H_3 V22 -#define H_4 V23 - -// temporary registers (for short-lived values) -#define T_0 V24 -#define T_1 V25 -#define T_2 V26 -#define T_3 V27 -#define T_4 V28 - -GLOBL ·constants<>(SB), RODATA, $0x30 -// EX0 -DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 -DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 -// EX1 -DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 -// EX2 -DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d - -// MULTIPLY multiplies each lane of f and g, partially reduced -// modulo 2¹³⁰ - 5. The result, h, consists of partial products -// in each lane that need to be reduced further to produce the -// final result. -// -// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ -// -// Note that the multiplication by 5 of the high bits is -// achieved by precalculating the multiplication of four of the -// g coefficients by 5. These are g51-g54. -#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ - VMLOF f0, g0, h0 \ - VMLOF f0, g3, h3 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g4, h4 \ - VMLOF f0, g2, h2 \ - VMLOF f1, g54, T_0 \ - VMLOF f1, g2, T_3 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g3, T_4 \ - VMLOF f1, g1, T_2 \ - VMALOF f2, g53, h0, h0 \ - VMALOF f2, g1, h3, h3 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g2, h4, h4 \ - VMALOF f2, g0, h2, h2 \ - VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g0, T_3, T_3 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g1, T_4, T_4 \ - VMALOF f3, g54, T_2, T_2 \ - VMALOF f4, g51, h0, h0 \ - VMALOF f4, g54, h3, h3 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g0, h4, h4 \ - VMALOF f4, g53, h2, h2 \ - VAG T_0, h0, h0 \ - VAG T_3, h3, h3 \ - VAG T_1, h1, h1 \ - VAG T_4, h4, h4 \ - VAG T_2, h2, h2 - -// REDUCE performs the following carry operations in four -// stages, as specified in Bernstein & Schwabe: -// -// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] -// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] -// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] -// 4: h₂₆[3]->h₂₆[4] -// -// The result is that all of the limbs are limited to 26-bits -// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. -// -// Note that although each limb is aligned at 26-bit intervals -// they may contain values that exceed 2²⁶ - 1, hence the need -// to carry the excess bits in each limb. -#define REDUCE(h0, h1, h2, h3, h4) \ - VESRLG $26, h0, T_0 \ - VESRLG $26, h3, T_1 \ - VN MOD26, h0, h0 \ - VN MOD26, h3, h3 \ - VAG T_0, h1, h1 \ - VAG T_1, h4, h4 \ - VESRLG $26, h1, T_2 \ - VESRLG $26, h4, T_3 \ - VN MOD26, h1, h1 \ - VN MOD26, h4, h4 \ - VESLG $2, T_3, T_4 \ - VAG T_3, T_4, T_4 \ - VAG T_2, h2, h2 \ - VAG T_4, h0, h0 \ - VESRLG $26, h2, T_0 \ - VESRLG $26, h0, T_1 \ - VN MOD26, h2, h2 \ - VN MOD26, h0, h0 \ - VAG T_0, h3, h3 \ - VAG T_1, h1, h1 \ - VESRLG $26, h3, T_2 \ - VN MOD26, h3, h3 \ - VAG T_2, h4, h4 - -// EXPAND splits the 128-bit little-endian values in0 and in1 -// into 26-bit big-endian limbs and places the results into -// the first and second lane of d₂₆[0:4] respectively. -// -// The EX0, EX1 and EX2 constants are arrays of byte indices -// for permutation. The permutation both reverses the bytes -// in the input and ensures the bytes are copied into the -// destination limb ready to be shifted into their final -// position. -#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VPERM in0, in1, EX0, d0 \ - VPERM in0, in1, EX1, d2 \ - VPERM in0, in1, EX2, d4 \ - VESRLG $26, d0, d1 \ - VESRLG $30, d2, d3 \ - VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] - VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] - VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] - VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] - VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] - -// func updateVX(state *macState, msg []byte) -TEXT ·updateVX(SB), NOSPLIT, $0 - MOVD state+0(FP), R1 - LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // generate masks - VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] - VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] - - // load h (accumulator) and r (key) from state - VZERO T_1 // [0, 0] - VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] - VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] - VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] - VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] - VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] - - // unpack h and r into 26-bit limbs - // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value - VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] - VZERO H_1 // [0, 0] - VZERO H_3 // [0, 0] - VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out - VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] - VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] - VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only - VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] - VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only - VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete - VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete - - // replicate r across all 4 vector elements - VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] - VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] - VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] - VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] - VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] - - // zero out lane 1 of h - VLEIG $1, $0, H_0 // [h₂₆[0], 0] - VLEIG $1, $0, H_1 // [h₂₆[1], 0] - VLEIG $1, $0, H_2 // [h₂₆[2], 0] - VLEIG $1, $0, H_3 // [h₂₆[3], 0] - VLEIG $1, $0, H_4 // [h₂₆[4], 0] - - // calculate 5r (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] - - // skip r² calculation if we are only calculating one block - CMPBLE R3, $16, skip - - // calculate r² - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) - REDUCE(M_0, M_1, M_2, M_3, M_4) - VGBM $0x0f0f, T_0 - VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] - VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] - VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] - VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] - VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] - - // calculate 5r² (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] - -loop: - CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients - - // load next 2 blocks from message - VLM (R2), T_0, T_1 - - // update message slice - SUB $32, R3 - MOVD $32(R2), R2 - - // unpack message blocks into 26-bit big-endian limbs - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // add 2¹²⁸ to each message block value - VLEIB $4, $1, M_4 - VLEIB $12, $1, M_4 - -multiply: - // accumulate the incoming message - VAG H_0, M_0, M_0 - VAG H_3, M_3, M_3 - VAG H_1, M_1, M_1 - VAG H_4, M_4, M_4 - VAG H_2, M_2, M_2 - - // multiply the accumulator by the key coefficient - MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) - - // carry and partially reduce the partial products - REDUCE(H_0, H_1, H_2, H_3, H_4) - - CMPBNE R3, $0, loop - -finish: - // sum lane 0 and lane 1 and put the result in lane 1 - VZERO T_0 - VSUMQG H_0, T_0, H_0 - VSUMQG H_3, T_0, H_3 - VSUMQG H_1, T_0, H_1 - VSUMQG H_4, T_0, H_4 - VSUMQG H_2, T_0, H_2 - - // reduce again after summation - // TODO(mundaym): there might be a more efficient way to do this - // now that we only have 1 active lane. For example, we could - // simultaneously pack the values as we reduce them. - REDUCE(H_0, H_1, H_2, H_3, H_4) - - // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 - // TODO(mundaym): in testing this final carry was unnecessary. - // Needs a proof before it can be removed though. - VESRLG $26, H_1, T_1 - VN MOD26, H_1, H_1 - VAQ T_1, H_2, H_2 - VESRLG $26, H_2, T_2 - VN MOD26, H_2, H_2 - VAQ T_2, H_3, H_3 - VESRLG $26, H_3, T_3 - VN MOD26, H_3, H_3 - VAQ T_3, H_4, H_4 - - // h is now < 2(2¹³⁰ - 5) - // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. - VESLG $26, H_1, H_1 - VESLG $26, H_3, H_3 - VO H_0, H_1, H_0 - VO H_2, H_3, H_2 - VESLG $4, H_2, H_2 - VLEIB $7, $48, H_1 - VSLB H_1, H_2, H_2 - VO H_0, H_2, H_0 - VLEIB $7, $104, H_1 - VSLB H_1, H_4, H_3 - VO H_3, H_0, H_0 - VLEIB $7, $24, H_1 - VSRLB H_1, H_4, H_1 - - // update state - VSTEG $1, H_0, 0(R1) - VSTEG $0, H_0, 8(R1) - VSTEG $1, H_1, 16(R1) - RET - -b2: // 2 or fewer blocks remaining - CMPBLE R3, $16, b1 - - // Load the 2 remaining blocks (17-32 bytes remaining). - MOVD $-17(R3), R0 // index of final byte to load modulo 16 - VL (R2), T_0 // load full 16 byte block - VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) - CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long - VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 - - // Split both blocks into 26-bit limbs in the appropriate lanes. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the second to last block. - VLEIB $4, $1, M_4 - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, M_4 - - // Finally, set up the coefficients for the final multiplication. - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r² so that can be kept the - // same. We want lane 1 to be multiplied by r so we need to move - // the saved r value into the 32-bit odd index in lane 1 by - // rotating the 64-bit lane by 32. - VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only - VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] - VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] - VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] - VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] - VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] - VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] - VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] - VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] - VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] - - MOVD $0, R3 - BR multiply - -skip: - CMPBEQ R3, $0, finish - -b1: // 1 block remaining - - // Load the final block (1-16 bytes). This will be placed into - // lane 0. - MOVD $-1(R3), R0 - VLL R0, (R2), T_0 // pad to 16 bytes with zeros - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_0 - - // Set the message block in lane 1 to the value 0 so that it - // can be accumulated without affecting the final result. - VZERO T_1 - - // Split the final message block into 26-bit limbs in lane 0. - // Lane 1 will be contain 0. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, M_4 - - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r so we need to move the - // saved r value into the 32-bit odd index in lane 0. We want - // lane 1 to be set to the value 1. This makes multiplication - // a no-op. We do this by setting lane 1 in every register to 0 - // and then just setting the 32-bit index 3 in R_0 to 1. - VZERO T_0 - MOVD $0, R0 - MOVD $0x10111213, R12 - VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] - VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] - VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] - VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] - VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] - VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] - VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] - VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] - VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] - VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] - - // Set the value of lane 1 to be 1. - VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] - - MOVD $0, R3 - BR multiply diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index 8907183e..00000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if bytes.HasPrefix(line, armorEnd) { - l.eof = true - return 0, io.EOF - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask { - return 0, ArmorCorrupt - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 5b6e16c1..00000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389..00000000 --- a/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 743b35a1..00000000 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -// -// Deprecated: this package was only provided to support ElGamal encryption in -// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see -// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has -// compatibility and security issues (see https://eprint.iacr.org/2021/923). -// Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See “Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - if s.ModInverse(s, priv.P) == nil { - return nil, errors.New("elgamal: invalid private key") - } - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index 1d7a0ea0..00000000 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index d62f787e..00000000 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - identity.SelfSignature = sig - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index e8f0b5ca..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc9..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 6d763972..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e5..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a5..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 17135033..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go deleted file mode 100644 index 39844773..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - - "golang.org/x/crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = io.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 0a19794a..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - "math/bits" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte - sentFirst bool - buf []byte -} - -// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. -const minFirstPartialWrite = 512 - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - off := 0 - if !w.sentFirst { - if len(w.buf) > 0 || len(p) < minFirstPartialWrite { - off = len(w.buf) - w.buf = append(w.buf, p...) - if len(w.buf) < minFirstPartialWrite { - return len(p), nil - } - p = w.buf - w.buf = nil - } - w.sentFirst = true - } - - power := uint8(30) - for len(p) > 0 { - l := 1 << power - if len(p) < l { - power = uint8(bits.Len32(uint32(len(p)))) - 1 - l = 1 << power - } - w.lengthByte[0] = 224 + power - _, err = w.w.Write(w.lengthByte[:]) - if err == nil { - var m int - m, err = w.w.Write(p[:l]) - n += m - } - if err != nil { - if n < off { - return 0, err - } - return n - off, err - } - p = p[l:] - } - return n - off, nil -} - -func (w *partialLengthWriter) Close() error { - if len(w.buf) > 0 { - // In this case we can't send a 512 byte packet. - // Just send what we have. - p := w.buf - w.sentFirst = true - w.buf = nil - if _, err := w.Write(p); err != nil { - return err - } - } - - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 192aac37..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = io.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f525..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6c..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c61..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index b2a24a53..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff889..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 1a1a6296..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go deleted file mode 100644 index ff7ef530..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := io.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go deleted file mode 100644 index 359a462e..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := io.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 48a89314..00000000 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index f53244a1..00000000 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index b89d48b8..00000000 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - return writeAndSign(payload, candidateHashes, signed, hints, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[len(candidateHashes)-1:] - preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d07..00000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index fc04d03e..00000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear -// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. -// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't -// appear in the Signature.Format field. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" - - // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a - // Certificate.Type (or PublicKey.Type), but only in - // ClientConfig.HostKeyAlgorithms. - CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" - CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" -) - -const ( - // Deprecated: use CertAlgoRSAv01. - CertSigAlgoRSAv01 = CertAlgoRSAv01 - // Deprecated: use CertAlgoRSASHA256v01. - CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 - // Deprecated: use CertAlgoRSASHA512v01. - CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte - Rest []byte `ssh:"rest"` -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - // Default to KeyAlgoRSASHA512 for ssh-rsa signers. - if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { - sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) - if err != nil { - return err - } - c.Signature = sig - return nil - } - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -// certKeyAlgoNames is a mapping from known certificate algorithm names to the -// corresponding public key signature algorithm. -// -// This map must be kept in sync with the one in agent/client.go. -var certKeyAlgoNames = map[string]string{ - CertAlgoRSAv01: KeyAlgoRSA, - CertAlgoRSASHA256v01: KeyAlgoRSASHA256, - CertAlgoRSASHA512v01: KeyAlgoRSASHA512, - CertAlgoDSAv01: KeyAlgoDSA, - CertAlgoECDSA256v01: KeyAlgoECDSA256, - CertAlgoECDSA384v01: KeyAlgoECDSA384, - CertAlgoECDSA521v01: KeyAlgoECDSA521, - CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, - CertAlgoED25519v01: KeyAlgoED25519, - CertAlgoSKED25519v01: KeyAlgoSKED25519, -} - -// underlyingAlgo returns the signature algorithm associated with algo (which is -// an advertised or negotiated public key or host key algorithm). These are -// usually the same, except for certificate algorithms. -func underlyingAlgo(algo string) string { - if a, ok := certKeyAlgoNames[algo]; ok { - return a - } - return algo -} - -// certificateAlgo returns the certificate algorithms that uses the provided -// underlying signature algorithm. -func certificateAlgo(algo string) (certAlgo string, ok bool) { - for certName, algoName := range certKeyAlgoNames { - if algoName == algo { - return certName, true - } - } - return "", false -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the certificate algorithm name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - certName, ok := certificateAlgo(c.Key.Type()) - if !ok { - panic("unknown certificate type for key type " + c.Key.Type()) - } - return certName -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - switch out.Format { - case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: - out.Rest = in - return out, nil, ok - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00..00000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 87f48552..00000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,788 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/internal/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC 4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - if len(plain) == 0 { - return nil, errors.New("ssh: empty packet") - } - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(io.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC 4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return nil, err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return nil, err - } - ls.XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - if len(plain) == 0 { - return nil, errors.New("ssh: empty packet") - } - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return err - } - ls.XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index bdc356cb..00000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c, user: fullConf.User}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key exchange. -// algo is the negotiated algorithm, and may be a certificate type. -func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - if a := underlyingAlgo(algo); sig.Format != a { - return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the public key algorithms that the client will - // accept from the server for host key authentication, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from a PublicKey.Type method may be used, or - // any of the CertAlgo and KeyAlgo constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index 409b5ea1..00000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,725 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we - // advertised willingness to receive one, which we always do) or not. See - // RFC 8308, Section 2.4. - extensions := make(map[string][]byte) - if len(packet) > 0 && packet[0] == msgExtInfo { - var extInfo extInfoMsg - if err := Unmarshal(packet, &extInfo); err != nil { - return err - } - payload := extInfo.Payload - for i := uint32(0); i < extInfo.NumExtensions; i++ { - name, rest, ok := parseString(payload) - if !ok { - return parseError(msgExtInfo) - } - value, rest, ok := parseString(rest) - if !ok { - return parseError(msgExtInfo) - } - extensions[string(name)] = value - payload = rest - } - packet, err = c.transport.readPacket() - if err != nil { - return err - } - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - var tried []string - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { - tried = append(tried, m) - } - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if contains(tried, candidateMethod) { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) -} - -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { - keyFormat := signer.PublicKey().Type() - - // Like in sendKexInit, if the public key implements AlgorithmSigner we - // assume it supports all algorithms, otherwise only the key format one. - as, ok := signer.(AlgorithmSigner) - if !ok { - return algorithmSignerWrapper{signer}, keyFormat - } - - extPayload, ok := extensions["server-sig-algs"] - if !ok { - // If there is no "server-sig-algs" extension, fall back to the key - // format algorithm. - return as, keyFormat - } - - // The server-sig-algs extension only carries underlying signature - // algorithm, but we are trying to select a protocol-level public key - // algorithm, which might be a certificate type. Extend the list of server - // supported algorithms to include the corresponding certificate algorithms. - serverAlgos := strings.Split(string(extPayload), ",") - for _, algo := range serverAlgos { - if certAlgo, ok := certificateAlgo(algo); ok { - serverAlgos = append(serverAlgos, certAlgo) - } - } - - keyAlgos := algorithmsForKeyFormat(keyFormat) - algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) - if err != nil { - // If there is no overlap, try the key anyway with the key format - // algorithm, to support servers that fail to list all supported - // algorithms. - return as, keyFormat - } - return as, algo -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - pub := signer.PublicKey() - as, algo := pickSignatureAlgorithm(signer, extensions) - - ok, err := validateKey(pub, algo, user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pubKey := pub.Marshal() - data := buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, algo, pubKey) - sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: algo, - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: algo, - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, algo, c) -} - -func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { - pubKey := key.Marshal() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - gotMsgExtInfo := false - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgExtInfo: - // Ignore post-authentication RFC 8308 extensions, once. - if gotMsgExtInfo { - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - gotMsgExtInfo = true - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the name and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - gotMsgExtInfo := false - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgExtInfo: - // Ignore post-authentication RFC 8308 extensions, once. - if gotMsgExtInfo { - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - gotMsgExtInfo = true - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.Name, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index c7964275..00000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "strings" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} - -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA512, KeyAlgoRSASHA256, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported signature algorithms to their -// respective hashes needed for signing and verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoRSASHA256: crypto.SHA256, - KeyAlgoRSASHA512: crypto.SHA512, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - // KeyAlgoED25519 doesn't pre-hash. - KeyAlgoSKECDSA256: crypto.SHA256, - KeyAlgoSKED25519: crypto.SHA256, -} - -// algorithmsForKeyFormat returns the supported signature algorithms for a given -// public key format (PublicKey.Type), in order of preference. See RFC 8332, -// Section 2. See also the note in sendKexInit on backwards compatibility. -func algorithmsForKeyFormat(keyFormat string) []string { - switch keyFormat { - case KeyAlgoRSA: - return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} - case CertAlgoRSAv01: - return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} - default: - return []string{keyFormat} - } -} - -// supportedPubKeyAuthAlgos specifies the supported client public key -// authentication algorithms. Note that this doesn't include certificate types -// since those use the underlying algorithm. This list is sent to the client if -// it supports the server-sig-algs extension. Order is irrelevant. -var supportedPubKeyAuthAlgos = []string{ - KeyAlgoED25519, - KeyAlgoSKED25519, KeyAlgoSKECDSA256, - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, - KeyAlgoDSA, -} - -var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",") - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC 4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC 4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -var aeadCiphers = map[string]bool{ - gcmCipherID: true, - chacha20Poly1305ID: true, -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - if !aeadCiphers[ctos.Cipher] { - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - } - - if !aeadCiphers[stoc.Cipher] { - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. algo is the advertised -// algorithm, and may be a certificate type. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo string - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index 35661a52..00000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC 4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index f6bff60d..00000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 07a1843e..00000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,735 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - writePacketsLeft uint32 - writeBytesLeft int64 - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - kexLoopDone chan struct{} // closed (with writeError non-nil) when kexLoop exits - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - // Counters exclusively owned by readLoop. - readPacketsLeft uint32 - readBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex), - kexLoopDone: make(chan struct{}), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // Unblock reader. - t.conn.Close() - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - for request := range t.startKex { - request.done <- t.getWriteError() - } - - // Mark that the loop is done so that Close can return. - close(t.kexLoopDone) -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - isServer := len(t.hostKeys) > 0 - if isServer { - for _, k := range t.hostKeys { - // If k is an AlgorithmSigner, presume it supports all signature algorithms - // associated with the key format. (Ideally AlgorithmSigner would have a - // method to advertise supported algorithms, but it doesn't. This means that - // adding support for a new algorithm is a breaking change, as we will - // immediately negotiate it even if existing implementations don't support - // it. If that ever happens, we'll have to figure something out.) - // If k is not an AlgorithmSigner, we can only assume it only supports the - // algorithms that matches the key format. (This means that Sign can't pick - // a different default.) - keyFormat := k.PublicKey().Type() - if _, ok := k.(AlgorithmSigner); ok { - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) - } else { - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) - } - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - - // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what - // algorithms the server supports for public key authentication. See RFC - // 8308, Section 2.1. - if firstKeyExchange := t.sessionID == nil; firstKeyExchange { - msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) - msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) - msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") - } - } - - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - // Close the connection. This should cause the readLoop goroutine to wake up - // and close t.startKex, which will shut down kexLoop if running. - err := t.conn.Close() - - // Wait for the kexLoop goroutine to complete. - // At that point we know that the readLoop goroutine is complete too, - // because kexLoop itself waits for readLoop to close the startKex channel. - <-t.kexLoopDone - - return err -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, &magics) - } else { - result, err = t.client(kex, &magics) - } - - if err != nil { - return err - } - - firstKeyExchange := t.sessionID == nil - if firstKeyExchange { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - - // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO - // message with the server-sig-algs extension if the client supports it. See - // RFC 8308, Sections 2.4 and 3.1. - if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { - extInfo := &extInfoMsg{ - NumExtensions: 1, - Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)), - } - extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) - extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) - extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) - extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) - if err := t.conn.writePacket(Marshal(extInfo)); err != nil { - return err - } - } - - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -// algorithmSignerWrapper is an AlgorithmSigner that only supports the default -// key format algorithm. -// -// This is technically a violation of the AlgorithmSigner interface, but it -// should be unreachable given where we use this. Anyway, at least it returns an -// error instead of panicing or producing an incorrect signature. -type algorithmSignerWrapper struct { - Signer -} - -func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != underlyingAlgo(a.PublicKey().Type()) { - return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") - } - return a.Sign(rand, data) -} - -func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { - for _, k := range hostKeys { - if algo == k.PublicKey().Type() { - return algorithmSignerWrapper{k} - } - k, ok := k.(AlgorithmSigner) - if !ok { - continue - } - for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { - if algo == a { - return k - } - } - } - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) - if hostKey == nil { - return nil, errors.New("ssh: internal error: negotiated unsupported signature type") - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index af81d266..00000000 --- a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. -// -// See https://flak.tedunangst.com/post/bcrypt-pbkdf and -// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - "golang.org/x/crypto/blowfish" -) - -const blockSize = 32 - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - - numBlocks := (keyLen + blockSize - 1) / blockSize - key := make([]byte, numBlocks*blockSize) - - h := sha512.New() - h.Write(password) - shapass := h.Sum(nil) - - shasalt := make([]byte, 0, sha512.Size) - cnt, tmp := make([]byte, 4), make([]byte, blockSize) - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - - out := make([]byte, blockSize) - copy(out, tmp) - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index 927a90cd..00000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" - kexAlgoCurve25519SHA256 = "curve25519-sha256" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. algo is the negotiated algorithm, and may - // be a certificate type. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int - hashFunc crypto.Hash -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := group.hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: group.hashFunc, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := group.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H, algo) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: group.hashFunc, - }, err -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H, algo) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in - // RFC 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - hashFunc: crypto.SHA1, - } - - // This are the groups called diffie-hellman-group14-sha1 and - // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, - // and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - group14 := &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: group14.g, p: group14.p, pMinus1: group14.pMinus1, - hashFunc: crypto.SHA1, - } - kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ - g: group14.g, p: group14.p, pMinus1: group14.pMinus1, - hashFunc: crypto.SHA256, - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} -} - -// curve25519sha256 implements the curve25519-sha256 (formerly known as -// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H, algo) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} - -// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and -// diffie-hellman-group-exchange-sha256 key agreement protocols, -// as described in RFC 4419 -type dhGEXSHA struct { - hashFunc crypto.Hash -} - -const ( - dhGroupExchangeMinimumBits = 2048 - dhGroupExchangePreferredBits = 2048 - dhGroupExchangeMaximumBits = 8192 -) - -func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - // Send GexRequest - kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, - } - if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { - return nil, err - } - - // Receive GexGroup - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var msg kexDHGexGroupMsg - if err = Unmarshal(packet, &msg); err != nil { - return nil, err - } - - // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) - } - - // Check if g is safe by verifying that 1 < g < p-1 - pMinusOne := new(big.Int).Sub(msg.P, bigOne) - if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { - return nil, fmt.Errorf("ssh: server provided gex g is not safe") - } - - // Send GexInit - pHalf := new(big.Int).Rsh(msg.P, 1) - x, err := rand.Int(randSource, pHalf) - if err != nil { - return nil, err - } - X := new(big.Int).Exp(msg.G, x, msg.P) - kexDHGexInit := kexDHGexInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { - return nil, err - } - - // Receive GexReply - packet, err = c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexReply kexDHGexReplyMsg - if err = Unmarshal(packet, &kexDHGexReply); err != nil { - return nil, err - } - - if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - - // Check if k is safe by verifying that k > 1 and k < p - 1 - if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { - return nil, fmt.Errorf("ssh: derived k is not safe") - } - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, kexDHGexReply.HostKey) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, msg.P) - writeInt(h, msg.G) - writeInt(h, X) - writeInt(h, kexDHGexReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHGexReply.HostKey, - Signature: kexDHGexReply.Signature, - Hash: gex.hashFunc, - }, nil -} - -// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - // Receive GexRequest - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHGexRequest kexDHGexRequestMsg - if err = Unmarshal(packet, &kexDHGexRequest); err != nil { - return - } - - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - g := big.NewInt(2) - - msg := &kexDHGexGroupMsg{ - P: p, - G: g, - } - if err := c.writePacket(Marshal(msg)); err != nil { - return nil, err - } - - // Receive GexInit - packet, err = c.readPacket() - if err != nil { - return - } - var kexDHGexInit kexDHGexInitMsg - if err = Unmarshal(packet, &kexDHGexInit); err != nil { - return - } - - pHalf := new(big.Int).Rsh(p, 1) - - y, err := rand.Int(randSource, pHalf) - if err != nil { - return - } - Y := new(big.Int).Exp(g, y, p) - - pMinusOne := new(big.Int).Sub(p, bigOne) - if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) - - hostKeyBytes := priv.PublicKey().Marshal() - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, p) - writeInt(h, g) - writeInt(h, kexDHGexInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H, algo) - if err != nil { - return nil, err - } - - kexDHGexReply := kexDHGexReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHGexReply) - - err = c.writePacket(packet) - - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: gex.hashFunc, - }, err -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 72969804..00000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1447 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" -) - -// Public key algorithms names. These values can appear in PublicKey.Type, -// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner -// arguments. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" - - // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not - // public key formats, so they can't appear as a PublicKey.Type. The - // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. - KeyAlgoRSASHA256 = "rsa-sha2-256" - KeyAlgoRSASHA512 = "rsa-sha2-512" -) - -const ( - // Deprecated: use KeyAlgoRSA. - SigAlgoRSA = KeyAlgoRSA - // Deprecated: use KeyAlgoRSASHA256. - SigAlgoRSASHA2256 = KeyAlgoRSASHA256 - // Deprecated: use KeyAlgoRSASHA512. - SigAlgoRSASHA2512 = KeyAlgoRSASHA512 -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoSKECDSA256: - return parseSKECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case KeyAlgoSKED25519: - return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certKeyAlgoNames[algo]) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKey parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey represents a public key using an unspecified algorithm. -// -// Some PublicKeys provided by this package also implement CryptoPublicKey. -type PublicKey interface { - // Type returns the key format name, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, with the name - // prefix. To unmarshal the returned data, use the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this key. This - // method will hash the data appropriately first. sig.Format is allowed to - // be any signature algorithm compatible with the key type, the caller - // should check if it has more stringent requirements. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -// -// Some Signers provided by this package also implement AlgorithmSigner. -type Signer interface { - // PublicKey returns the associated PublicKey. - PublicKey() PublicKey - - // Sign returns a signature for the given data. This method will hash the - // data appropriately first. The signature algorithm is expected to match - // the key format returned by the PublicKey.Type method (and not to be any - // alternative algorithm supported by the key format). - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// An AlgorithmSigner is a Signer that also supports specifying an algorithm to -// use for signing. -// -// An AlgorithmSigner can't advertise the algorithms it supports, so it should -// be prepared to be invoked with every algorithm supported by the public key -// format. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired - // signing algorithm. Callers may pass an empty string for the algorithm in - // which case the AlgorithmSigner will use a default algorithm. This default - // doesn't currently control any behavior in this package. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - supportedAlgos := algorithmsForKeyFormat(r.Type()) - if !contains(supportedAlgos, sig.Format) { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - hash := hashFuncs[sig.Format] - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := hashFuncs[sig.Format].New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := hashFuncs[k.PublicKey().Type()].New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - return ed25519PublicKey(w.KeyBytes), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k); l != ed25519.PublicKeySize { - return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - - if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := hashFuncs[sig.Format].New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// skFields holds the additional fields present in U2F/FIDO2 signatures. -// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. -type skFields struct { - // Flags contains U2F/FIDO2 flags such as 'user present' - Flags byte - // Counter is a monotonic signature counter which can be - // used to detect concurrent use of a private key, should - // it be extracted from hardware. - Counter uint32 -} - -type skECDSAPublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ecdsa.PublicKey -} - -func (k *skECDSAPublicKey) Type() string { - return KeyAlgoSKECDSA256 -} - -func (k *skECDSAPublicKey) nistID() string { - return "nistp256" -} - -func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(skECDSAPublicKey) - key.application = w.Application - - if w.Curve != "nistp256" { - return nil, nil, errors.New("ssh: unsupported curve") - } - key.Curve = elliptic.P256() - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - - return key, w.Rest, nil -} - -func (k *skECDSAPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - w := struct { - Name string - ID string - Key []byte - Application string - }{ - k.Type(), - k.nistID(), - keyBytes, - k.application, - } - - return Marshal(&w) -} - -func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := hashFuncs[sig.Format].New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var ecSig struct { - R *big.Int - S *big.Int - } - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - h.Reset() - h.Write(original) - digest := h.Sum(nil) - - if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -type skEd25519PublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ed25519.PublicKey -} - -func (k *skEd25519PublicKey) Type() string { - return KeyAlgoSKED25519 -} - -func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - key := new(skEd25519PublicKey) - key.application = w.Application - key.PublicKey = ed25519.PublicKey(w.KeyBytes) - - return key, w.Rest, nil -} - -func (k *skEd25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - Application string - }{ - KeyAlgoSKED25519, - []byte(k.PublicKey), - k.application, - } - return Marshal(&w) -} - -func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k.PublicKey); l != ed25519.PublicKeySize { - return fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - h := hashFuncs[sig.Format].New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var edSig struct { - Signature []byte `ssh:"rest"` - } - - if err := Unmarshal(sig.Blob, &edSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm == "" { - algorithm = s.pubKey.Type() - } - - supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) - if !contains(supportedAlgos, algorithm) { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) - } - - hashFunc := hashFuncs[algorithm] - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - if l := len(key); l != ed25519.PublicKeySize { - return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - return ed25519PublicKey(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. If the private key is encrypted, it -// will return a PassphraseMissingError. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// A PassphraseMissingError indicates that parsing this private key requires a -// passphrase. Use ParsePrivateKeyWithPassphrase. -type PassphraseMissingError struct { - // PublicKey will be set if the private key format includes an unencrypted - // public key along with the encrypted private key. - PublicKey PublicKey -} - -func (*PassphraseMissingError) Error() string { - return "ssh: this private key is passphrase protected" -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the -// private key is encrypted, it will return a PassphraseMissingError. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, &PassphraseMissingError{} - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If the passphrase is wrong, it -// will return x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if block.Type == "OPENSSH PRIVATE KEY" { - return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) - } - - if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { - return nil, errors.New("ssh: not an encrypted key") - } - - buf, err := x509.DecryptPEMBlock(block, passphrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName != "none" || cipherName != "none" { - return nil, &PassphraseMissingError{} - } - if kdfOpts != "" { - return nil, errors.New("ssh: invalid openssh private key") - } - return privKeyBlock, nil -} - -func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { - return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName == "none" || cipherName == "none" { - return nil, errors.New("ssh: key is not password protected") - } - if kdfName != "bcrypt" { - return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") - } - - var opts struct { - Salt string - Rounds uint32 - } - if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { - return nil, err - } - - k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) - if err != nil { - return nil, err - } - key, iv := k[:32], k[32:] - - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - switch cipherName { - case "aes256-ctr": - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) - case "aes256-cbc": - if len(privKeyBlock)%c.BlockSize() != 0 { - return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") - } - cbc := cipher.NewCBCDecrypter(c, iv) - cbc.CryptBlocks(privKeyBlock, privKeyBlock) - default: - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") - } - - return privKeyBlock, nil - } -} - -type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) - -// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt -// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used -// as the decrypt function to parse an unencrypted private key. See -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. -func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - if w.NumKeys != 1 { - // We only support single key files, and so does OpenSSH. - // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 - return nil, errors.New("ssh: multi-key files are not supported") - } - - privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) - if err != nil { - if err, ok := err.(*PassphraseMissingError); ok { - pub, errPub := ParsePublicKey(w.PubKey) - if errPub != nil { - return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) - } - err.PublicKey = pub - } - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { - if w.CipherName != "none" { - return nil, x509.IncorrectPasswordError - } - return nil, errors.New("ssh: malformed OpenSSH key") - } - - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - var curve elliptic.Curve - switch key.Curve { - case "nistp256": - curve = elliptic.P256() - case "nistp384": - curve = elliptic.P384() - case "nistp521": - curve = elliptic.P521() - default: - return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) - } - - X, Y := elliptic.Unmarshal(curve, key.Pub) - if X == nil || Y == nil { - return nil, errors.New("ssh: failed to unmarshal public key") - } - - if key.D.Cmp(curve.Params().N) >= 0 { - return nil, errors.New("ssh: scalar is out of range") - } - - x, y := curve.ScalarBaseMult(key.D.Bytes()) - if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { - return nil, errors.New("ssh: public key does not match private key") - } - - return &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: X, - Y: Y, - }, - D: key.D, - }, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -func checkOpenSSHKeyPadding(pad []byte) error { - for i, b := range pad { - if int(b) != i+1 { - return errors.New("ssh: padding not as expected") - } - } - return nil -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a0628..00000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index 922032d9..00000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,877 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Hellman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4419, section 5. -const msgKexDHGexGroup = 31 - -type kexDHGexGroupMsg struct { - P *big.Int `sshtype:"31"` - G *big.Int -} - -const msgKexDHGexInit = 32 - -type kexDHGexInitMsg struct { - X *big.Int `sshtype:"32"` -} - -const msgKexDHGexReply = 33 - -type kexDHGexReplyMsg struct { - HostKey []byte `sshtype:"33"` - Y *big.Int - Signature []byte -} - -const msgKexDHGexRequest = 34 - -type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 8308, section 2.3 -const msgExtInfo = 7 - -type extInfoMsg struct { - NumExtensions uint32 `sshtype:"7"` - Payload []byte `ssh:"rest"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - Name string `sshtype:"60"` - Instruction string - Language string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgExtInfo: - msg = new(extInfoMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgExtInfo: "extInfoMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 9654c018..00000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return m.handleUnknownChannelPacket(id, packet) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} - -func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - // RFC 4254 section 5.4 says unrecognized channel requests should - // receive a failure response. - case *channelRequestMsg: - if msg.WantReply { - return m.sendMessage(channelRequestFailureMsg{ - PeersID: msg.PeersID, - }) - } - return nil - default: - return fmt.Errorf("ssh: invalid channel %d", id) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index 9e387029..00000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - // To determine NoClientAuth at runtime, set NoClientAuth to true - // and the optional NoClientAuthCallback to a non-nil value. - NoClientAuth bool - - // NoClientAuthCallback, if non-nil, is called when a user - // attempts to authenticate with auth method "none". - // NoClientAuth must also be set to true for this be used, or - // this func is unused. - NoClientAuthCallback func(ConnMetadata) (*Permissions, error) - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same public key format, it is replaced. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. algo is the negotiate -// algorithm and may be a certificate type. -func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { - sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - if config.NoClientAuthCallback != nil { - perms, authErr = config.NoClientAuthCallback(s) - } else { - authErr = nil - } - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !contains(supportedPubKeyAuthAlgos, sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - if underlyingAlgo(algo) != sig.Format { - authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) - break - } - - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { - authErr = errors.New("ssh: gssapi-with-mic auth not configured") - break - } - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue - } - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Name: name, - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index acef6225..00000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - IUTF8 = 42 // RFC 8160 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of io.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = io.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = io.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e..00000000 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330..00000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5e..00000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index acf5a21b..00000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - - var macKey []byte - if !aeadCiphers[algs.Cipher] { - macMode := macModes[algs.MAC] - macKey = make([]byte, macMode.keySize) - generateKeyMaterial(macKey, d.macKeyTag, kex) - } - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/mod/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/mod/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/sumdb/dirhash/hash.go b/vendor/golang.org/x/mod/sumdb/dirhash/hash.go deleted file mode 100644 index 51ec4db8..00000000 --- a/vendor/golang.org/x/mod/sumdb/dirhash/hash.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dirhash defines hashes over directory trees. -// These hashes are recorded in go.sum files and in the Go checksum database, -// to allow verifying that a newly-downloaded module has the expected content. -package dirhash - -import ( - "archive/zip" - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" -) - -// DefaultHash is the default hash function used in new go.sum entries. -var DefaultHash Hash = Hash1 - -// A Hash is a directory hash function. -// It accepts a list of files along with a function that opens the content of each file. -// It opens, reads, hashes, and closes each file and returns the overall directory hash. -type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error) - -// Hash1 is the "h1:" directory hash function, using SHA-256. -// -// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary -// prepared as if by the Unix command: -// -// sha256sum $(find . -type f | sort) | sha256sum -// -// More precisely, the hashed summary contains a single line for each file in the list, -// ordered by sort.Strings applied to the file names, where each line consists of -// the hexadecimal SHA-256 hash of the file content, -// two spaces (U+0020), the file name, and a newline (U+000A). -// -// File names with newlines (U+000A) are disallowed. -func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) { - h := sha256.New() - files = append([]string(nil), files...) - sort.Strings(files) - for _, file := range files { - if strings.Contains(file, "\n") { - return "", errors.New("dirhash: filenames with newlines are not supported") - } - r, err := open(file) - if err != nil { - return "", err - } - hf := sha256.New() - _, err = io.Copy(hf, r) - r.Close() - if err != nil { - return "", err - } - fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file) - } - return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil -} - -// HashDir returns the hash of the local file system directory dir, -// replacing the directory name itself with prefix in the file names -// used in the hash function. -func HashDir(dir, prefix string, hash Hash) (string, error) { - files, err := DirFiles(dir, prefix) - if err != nil { - return "", err - } - osOpen := func(name string) (io.ReadCloser, error) { - return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix))) - } - return hash(files, osOpen) -} - -// DirFiles returns the list of files in the tree rooted at dir, -// replacing the directory name dir with prefix in each name. -// The resulting names always use forward slashes. -func DirFiles(dir, prefix string) ([]string, error) { - var files []string - dir = filepath.Clean(dir) - err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } else if file == dir { - return fmt.Errorf("%s is not a directory", dir) - } - - rel := file - if dir != "." { - rel = file[len(dir)+1:] - } - f := filepath.Join(prefix, rel) - files = append(files, filepath.ToSlash(f)) - return nil - }) - if err != nil { - return nil, err - } - return files, nil -} - -// HashZip returns the hash of the file content in the named zip file. -// Only the file names and their contents are included in the hash: -// the exact zip file format encoding, compression method, -// per-file modification times, and other metadata are ignored. -func HashZip(zipfile string, hash Hash) (string, error) { - z, err := zip.OpenReader(zipfile) - if err != nil { - return "", err - } - defer z.Close() - var files []string - zfiles := make(map[string]*zip.File) - for _, file := range z.File { - files = append(files, file.Name) - zfiles[file.Name] = file - } - zipOpen := func(name string) (io.ReadCloser, error) { - f := zfiles[name] - if f == nil { - return nil, fmt.Errorf("file %q not found in zip", name) // should never happen - } - return f.Open() - } - return hash(files, zipOpen) -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s deleted file mode 100644 index db9171c2..00000000 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -#include "textflag.h" - -// -// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go -// - -TEXT ·syscall6(SB),NOSPLIT,$0-88 - JMP syscall·syscall6(SB) - -TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 - JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go deleted file mode 100644 index 271055be..00000000 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "runtime" -) - -// byteOrder is a subset of encoding/binary.ByteOrder. -type byteOrder interface { - Uint32([]byte) uint32 - Uint64([]byte) uint64 -} - -type littleEndian struct{} -type bigEndian struct{} - -func (littleEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (littleEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func (bigEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (bigEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} - -// hostByteOrder returns littleEndian on little-endian machines and -// bigEndian on big-endian machines. -func hostByteOrder() byteOrder { - switch runtime.GOARCH { - case "386", "amd64", "amd64p32", - "alpha", - "arm", "arm64", - "loong64", - "mipsle", "mips64le", "mips64p32le", - "nios2", - "ppc64le", - "riscv", "riscv64", - "sh": - return littleEndian{} - case "armbe", "arm64be", - "m68k", - "mips", "mips64", "mips64p32", - "ppc", "ppc64", - "s390", "s390x", - "shbe", - "sparc", "sparc64": - return bigEndian{} - } - panic("unknown architecture") -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go deleted file mode 100644 index 83f112c4..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpu implements processor feature detection for -// various CPU architectures. -package cpu - -import ( - "os" - "strings" -) - -// Initialized reports whether the CPU features were initialized. -// -// For some GOOS/GOARCH combinations initialization of the CPU features depends -// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm -// Initialized will report false if reading the file fails. -var Initialized bool - -// CacheLinePad is used to pad structs to avoid false sharing. -type CacheLinePad struct{ _ [cacheLineSize]byte } - -// X86 contains the supported CPU features of the -// current X86/AMD64 platform. If the current platform -// is not X86/AMD64 then all feature flags are false. -// -// X86 is padded to avoid false sharing. Further the HasAVX -// and HasAVX2 are only set if the OS supports XMM and YMM -// registers in addition to the CPUID feature bit being set. -var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasAVX512 bool // Advanced vector extension 512 - HasAVX512F bool // Advanced vector extension 512 Foundation Instructions - HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions - HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions - HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions - HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions - HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions - HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add - HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions - HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision - HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision - HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions - HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations - HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions - HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions - HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions - HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 - HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms - HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasCX16 bool // Compare and exchange 16 Bytes - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - _ CacheLinePad -} - -// ARM64 contains the supported CPU features of the -// current ARMv8(aarch64) platform. If the current platform -// is not arm64 then all feature flags are false. -var ARM64 struct { - _ CacheLinePad - HasFP bool // Floating-point instruction set (always available) - HasASIMD bool // Advanced SIMD (always available) - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - HasATOMICS bool // Atomic memory operation instruction set - HasFPHP bool // Half precision floating-point instruction set - HasASIMDHP bool // Advanced SIMD half precision instruction set - HasCPUID bool // CPUID identification scheme registers - HasASIMDRDM bool // Rounding double multiply add/subtract instruction set - HasJSCVT bool // Javascript conversion from floating-point to integer - HasFCMA bool // Floating-point multiplication and addition of complex numbers - HasLRCPC bool // Release Consistent processor consistent support - HasDCPOP bool // Persistent memory support - HasSHA3 bool // SHA3 hardware implementation - HasSM3 bool // SM3 hardware implementation - HasSM4 bool // SM4 hardware implementation - HasASIMDDP bool // Advanced SIMD double precision instruction set - HasSHA512 bool // SHA512 hardware implementation - HasSVE bool // Scalable Vector Extensions - HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 - _ CacheLinePad -} - -// ARM contains the supported CPU features of the current ARM (32-bit) platform. -// All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. -var ARM struct { - _ CacheLinePad - HasSWP bool // SWP instruction support - HasHALF bool // Half-word load and store support - HasTHUMB bool // ARM Thumb instruction set - Has26BIT bool // Address space limited to 26-bits - HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support - HasFPA bool // Floating point arithmetic support - HasVFP bool // Vector floating point support - HasEDSP bool // DSP Extensions support - HasJAVA bool // Java instruction set - HasIWMMXT bool // Intel Wireless MMX technology support - HasCRUNCH bool // MaverickCrunch context switching and handling - HasTHUMBEE bool // Thumb EE instruction set - HasNEON bool // NEON instruction set - HasVFPv3 bool // Vector floating point version 3 support - HasVFPv3D16 bool // Vector floating point version 3 D8-D15 - HasTLS bool // Thread local storage support - HasVFPv4 bool // Vector floating point version 4 support - HasIDIVA bool // Integer divide instruction support in ARM mode - HasIDIVT bool // Integer divide instruction support in Thumb mode - HasVFPD32 bool // Vector floating point version 3 D15-D31 - HasLPAE bool // Large Physical Address Extensions - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - _ CacheLinePad -} - -// MIPS64X contains the supported CPU features of the current mips64/mips64le -// platforms. If the current platform is not mips64/mips64le or the current -// operating system is not Linux then all feature flags are false. -var MIPS64X struct { - _ CacheLinePad - HasMSA bool // MIPS SIMD architecture - _ CacheLinePad -} - -// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. -// If the current platform is not ppc64/ppc64le then all feature flags are false. -// -// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The struct is padded to avoid false sharing. -var PPC64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 - _ CacheLinePad -} - -// S390X contains the supported CPU features of the current IBM Z -// (s390x) platform. If the current platform is not IBM Z then all -// feature flags are false. -// -// S390X is padded to avoid false sharing. Further HasVX is only set -// if the OS supports vector registers in addition to the STFLE -// feature bit being set. -var S390X struct { - _ CacheLinePad - HasZARCH bool // z/Architecture mode is active [mandatory] - HasSTFLE bool // store facility list extended - HasLDISP bool // long (20-bit) displacements - HasEIMM bool // 32-bit immediates - HasDFP bool // decimal floating point - HasETF3EH bool // ETF-3 enhanced - HasMSA bool // message security assist (CPACF) - HasAES bool // KM-AES{128,192,256} functions - HasAESCBC bool // KMC-AES{128,192,256} functions - HasAESCTR bool // KMCTR-AES{128,192,256} functions - HasAESGCM bool // KMA-GCM-AES{128,192,256} functions - HasGHASH bool // KIMD-GHASH function - HasSHA1 bool // K{I,L}MD-SHA-1 functions - HasSHA256 bool // K{I,L}MD-SHA-256 functions - HasSHA512 bool // K{I,L}MD-SHA-512 functions - HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions - HasVX bool // vector facility - HasVXE bool // vector-enhancements facility 1 - _ CacheLinePad -} - -func init() { - archInit() - initOptions() - processOptions() -} - -// options contains the cpu debug options that can be used in GODEBUG. -// Options are arch dependent and are added by the arch specific initOptions functions. -// Features that are mandatory for the specific GOARCH should have the Required field set -// (e.g. SSE2 on amd64). -var options []option - -// Option names should be lower case. e.g. avx instead of AVX. -type option struct { - Name string - Feature *bool - Specified bool // whether feature value was specified in GODEBUG - Enable bool // whether feature should be enabled - Required bool // whether feature is mandatory and can not be disabled -} - -func processOptions() { - env := os.Getenv("GODEBUG") -field: - for env != "" { - field := "" - i := strings.IndexByte(env, ',') - if i < 0 { - field, env = env, "" - } else { - field, env = env[:i], env[i+1:] - } - if len(field) < 4 || field[:4] != "cpu." { - continue - } - i = strings.IndexByte(field, '=') - if i < 0 { - print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") - continue - } - key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" - - var enable bool - switch value { - case "on": - enable = true - case "off": - enable = false - default: - print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") - continue field - } - - if key == "all" { - for i := range options { - options[i].Specified = true - options[i].Enable = enable || options[i].Required - } - continue field - } - - for i := range options { - if options[i].Name == key { - options[i].Specified = true - options[i].Enable = enable - continue field - } - } - - print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") - } - - for _, o := range options { - if !o.Specified { - continue - } - - if o.Enable && !*o.Feature { - print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") - continue - } - - if !o.Enable && o.Required { - print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") - continue - } - - *o.Feature = o.Enable - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go deleted file mode 100644 index 8aaeef54..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix -// +build aix - -package cpu - -const ( - // getsystemcfg constants - _SC_IMPL = 2 - _IMPL_POWER8 = 0x10000 - _IMPL_POWER9 = 0x20000 -) - -func archInit() { - impl := getsystemcfg(_SC_IMPL) - if impl&_IMPL_POWER8 != 0 { - PPC64.IsPOWER8 = true - } - if impl&_IMPL_POWER9 != 0 { - PPC64.IsPOWER8 = true - PPC64.IsPOWER9 = true - } - - Initialized = true -} - -func getsystemcfg(label int) (n uint64) { - r0, _ := callgetsystemcfg(label) - n = uint64(r0) - return -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go deleted file mode 100644 index 301b752e..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 32 - -// HWCAP/HWCAP2 bits. -// These are specific to Linux. -const ( - hwcap_SWP = 1 << 0 - hwcap_HALF = 1 << 1 - hwcap_THUMB = 1 << 2 - hwcap_26BIT = 1 << 3 - hwcap_FAST_MULT = 1 << 4 - hwcap_FPA = 1 << 5 - hwcap_VFP = 1 << 6 - hwcap_EDSP = 1 << 7 - hwcap_JAVA = 1 << 8 - hwcap_IWMMXT = 1 << 9 - hwcap_CRUNCH = 1 << 10 - hwcap_THUMBEE = 1 << 11 - hwcap_NEON = 1 << 12 - hwcap_VFPv3 = 1 << 13 - hwcap_VFPv3D16 = 1 << 14 - hwcap_TLS = 1 << 15 - hwcap_VFPv4 = 1 << 16 - hwcap_IDIVA = 1 << 17 - hwcap_IDIVT = 1 << 18 - hwcap_VFPD32 = 1 << 19 - hwcap_LPAE = 1 << 20 - hwcap_EVTSTRM = 1 << 21 - - hwcap2_AES = 1 << 0 - hwcap2_PMULL = 1 << 1 - hwcap2_SHA1 = 1 << 2 - hwcap2_SHA2 = 1 << 3 - hwcap2_CRC32 = 1 << 4 -) - -func initOptions() { - options = []option{ - {Name: "pmull", Feature: &ARM.HasPMULL}, - {Name: "sha1", Feature: &ARM.HasSHA1}, - {Name: "sha2", Feature: &ARM.HasSHA2}, - {Name: "swp", Feature: &ARM.HasSWP}, - {Name: "thumb", Feature: &ARM.HasTHUMB}, - {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, - {Name: "tls", Feature: &ARM.HasTLS}, - {Name: "vfp", Feature: &ARM.HasVFP}, - {Name: "vfpd32", Feature: &ARM.HasVFPD32}, - {Name: "vfpv3", Feature: &ARM.HasVFPv3}, - {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, - {Name: "vfpv4", Feature: &ARM.HasVFPv4}, - {Name: "half", Feature: &ARM.HasHALF}, - {Name: "26bit", Feature: &ARM.Has26BIT}, - {Name: "fastmul", Feature: &ARM.HasFASTMUL}, - {Name: "fpa", Feature: &ARM.HasFPA}, - {Name: "edsp", Feature: &ARM.HasEDSP}, - {Name: "java", Feature: &ARM.HasJAVA}, - {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, - {Name: "crunch", Feature: &ARM.HasCRUNCH}, - {Name: "neon", Feature: &ARM.HasNEON}, - {Name: "idivt", Feature: &ARM.HasIDIVT}, - {Name: "idiva", Feature: &ARM.HasIDIVA}, - {Name: "lpae", Feature: &ARM.HasLPAE}, - {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, - {Name: "aes", Feature: &ARM.HasAES}, - {Name: "crc32", Feature: &ARM.HasCRC32}, - } - -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go deleted file mode 100644 index f3eb993b..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "runtime" - -// cacheLineSize is used to prevent false sharing of cache lines. -// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. -// It doesn't cost much and is much more future-proof. -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "fp", Feature: &ARM64.HasFP}, - {Name: "asimd", Feature: &ARM64.HasASIMD}, - {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, - {Name: "aes", Feature: &ARM64.HasAES}, - {Name: "fphp", Feature: &ARM64.HasFPHP}, - {Name: "jscvt", Feature: &ARM64.HasJSCVT}, - {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, - {Name: "pmull", Feature: &ARM64.HasPMULL}, - {Name: "sha1", Feature: &ARM64.HasSHA1}, - {Name: "sha2", Feature: &ARM64.HasSHA2}, - {Name: "sha3", Feature: &ARM64.HasSHA3}, - {Name: "sha512", Feature: &ARM64.HasSHA512}, - {Name: "sm3", Feature: &ARM64.HasSM3}, - {Name: "sm4", Feature: &ARM64.HasSM4}, - {Name: "sve", Feature: &ARM64.HasSVE}, - {Name: "crc32", Feature: &ARM64.HasCRC32}, - {Name: "atomics", Feature: &ARM64.HasATOMICS}, - {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, - {Name: "cpuid", Feature: &ARM64.HasCPUID}, - {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, - {Name: "fcma", Feature: &ARM64.HasFCMA}, - {Name: "dcpop", Feature: &ARM64.HasDCPOP}, - {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, - {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, - } -} - -func archInit() { - switch runtime.GOOS { - case "freebsd": - readARM64Registers() - case "linux", "netbsd", "openbsd": - doinit() - default: - // Many platforms don't seem to allow reading these registers. - setMinimalFeatures() - } -} - -// setMinimalFeatures fakes the minimal ARM64 features expected by -// TestARM64minimalFeatures. -func setMinimalFeatures() { - ARM64.HasASIMD = true - ARM64.HasFP = true -} - -func readARM64Registers() { - Initialized = true - - parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) -} - -func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { - // ID_AA64ISAR0_EL1 - switch extractBits(isar0, 4, 7) { - case 1: - ARM64.HasAES = true - case 2: - ARM64.HasAES = true - ARM64.HasPMULL = true - } - - switch extractBits(isar0, 8, 11) { - case 1: - ARM64.HasSHA1 = true - } - - switch extractBits(isar0, 12, 15) { - case 1: - ARM64.HasSHA2 = true - case 2: - ARM64.HasSHA2 = true - ARM64.HasSHA512 = true - } - - switch extractBits(isar0, 16, 19) { - case 1: - ARM64.HasCRC32 = true - } - - switch extractBits(isar0, 20, 23) { - case 2: - ARM64.HasATOMICS = true - } - - switch extractBits(isar0, 28, 31) { - case 1: - ARM64.HasASIMDRDM = true - } - - switch extractBits(isar0, 32, 35) { - case 1: - ARM64.HasSHA3 = true - } - - switch extractBits(isar0, 36, 39) { - case 1: - ARM64.HasSM3 = true - } - - switch extractBits(isar0, 40, 43) { - case 1: - ARM64.HasSM4 = true - } - - switch extractBits(isar0, 44, 47) { - case 1: - ARM64.HasASIMDDP = true - } - - // ID_AA64ISAR1_EL1 - switch extractBits(isar1, 0, 3) { - case 1: - ARM64.HasDCPOP = true - } - - switch extractBits(isar1, 12, 15) { - case 1: - ARM64.HasJSCVT = true - } - - switch extractBits(isar1, 16, 19) { - case 1: - ARM64.HasFCMA = true - } - - switch extractBits(isar1, 20, 23) { - case 1: - ARM64.HasLRCPC = true - } - - // ID_AA64PFR0_EL1 - switch extractBits(pfr0, 16, 19) { - case 0: - ARM64.HasFP = true - case 1: - ARM64.HasFP = true - ARM64.HasFPHP = true - } - - switch extractBits(pfr0, 20, 23) { - case 0: - ARM64.HasASIMD = true - case 1: - ARM64.HasASIMD = true - ARM64.HasASIMDHP = true - } - - switch extractBits(pfr0, 32, 35) { - case 1: - ARM64.HasSVE = true - } -} - -func extractBits(data uint64, start, end uint) uint { - return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s deleted file mode 100644 index c61f95a0..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -#include "textflag.h" - -// func getisar0() uint64 -TEXT ·getisar0(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 - MOVD R0, ret+0(FP) - RET - -// func getisar1() uint64 -TEXT ·getisar1(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 - MOVD R0, ret+0(FP) - RET - -// func getpfr0() uint64 -TEXT ·getpfr0(SB),NOSPLIT,$0-8 - // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 - MOVD R0, ret+0(FP) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go deleted file mode 100644 index ccf542a7..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -package cpu - -func getisar0() uint64 -func getisar1() uint64 -func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index 0af2f248..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index fa7cdb9b..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc - -package cpu - -// cpuid is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go deleted file mode 100644 index 2aff3189..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo -// +build gccgo - -package cpu - -func getisar0() uint64 { return 0 } -func getisar1() uint64 { return 0 } -func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index 4bfbda61..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo -// +build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c deleted file mode 100644 index 6cc73109..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo - -#include -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -#pragma GCC diagnostic ignored "-Wunknown-pragmas" -#pragma GCC push_options -#pragma GCC target("xsave") -#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - uint64_t v = _xgetbv(0); - *eax = v & 0xffffffff; - *edx = v >> 32; -} - -#pragma clang attribute pop -#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go deleted file mode 100644 index 863d415a..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 159a686f..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !386 && !amd64 && !amd64p32 && !arm64 -// +build !386,!amd64,!amd64p32,!arm64 - -package cpu - -func archInit() { - if err := readHWCAP(); err != nil { - return - } - doinit() - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go deleted file mode 100644 index 2057006d..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -func doinit() { - ARM.HasSWP = isSet(hwCap, hwcap_SWP) - ARM.HasHALF = isSet(hwCap, hwcap_HALF) - ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) - ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) - ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) - ARM.HasFPA = isSet(hwCap, hwcap_FPA) - ARM.HasVFP = isSet(hwCap, hwcap_VFP) - ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) - ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) - ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) - ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) - ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) - ARM.HasNEON = isSet(hwCap, hwcap_NEON) - ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) - ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) - ARM.HasTLS = isSet(hwCap, hwcap_TLS) - ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) - ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) - ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) - ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) - ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) - ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM.HasAES = isSet(hwCap2, hwcap2_AES) - ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) - ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) - ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) - ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index a968b80f..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "strings" - "syscall" -) - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 -) - -// linuxKernelCanEmulateCPUID reports whether we're running -// on Linux 4.11+. Ideally we'd like to ask the question about -// whether the current kernel contains -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 -// but the version number will have to do. -func linuxKernelCanEmulateCPUID() bool { - var un syscall.Utsname - syscall.Uname(&un) - var sb strings.Builder - for _, b := range un.Release[:] { - if b == 0 { - break - } - sb.WriteByte(byte(b)) - } - major, minor, _, ok := parseRelease(sb.String()) - return ok && (major > 4 || major == 4 && minor >= 11) -} - -func doinit() { - if err := readHWCAP(); err != nil { - // We failed to read /proc/self/auxv. This can happen if the binary has - // been given extra capabilities(7) with /bin/setcap. - // - // When this happens, we have two options. If the Linux kernel is new - // enough (4.11+), we can read the arm64 registers directly which'll - // trap into the kernel and then return back to userspace. - // - // But on older kernels, such as Linux 4.4.180 as used on many Synology - // devices, calling readARM64Registers (specifically getisar0) will - // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo - // instead. - // - // See golang/go#57336. - if linuxKernelCanEmulateCPUID() { - readARM64Registers() - } else { - readLinuxProcCPUInfo() - } - return - } - - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go deleted file mode 100644 index 6000db4c..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le - -package cpu - -// HWCAP bits. These are exposed by the Linux kernel 5.4. -const ( - // CPU features - hwcap_MIPS_MSA = 1 << 1 -) - -func doinit() { - // HWCAP feature bits - MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go deleted file mode 100644 index f4992b1a..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x -// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x - -package cpu - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 021356d6..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le - -package cpu - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index 1517ac61..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -func initS390Xbase() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go deleted file mode 100644 index 0f57b05b..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build loong64 -// +build loong64 - -package cpu - -const cacheLineSize = 64 - -func initOptions() { -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index f4063c66..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips64 || mips64le -// +build mips64 mips64le - -package cpu - -const cacheLineSize = 32 - -func initOptions() { - options = []option{ - {Name: "msa", Feature: &MIPS64X.HasMSA}, - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index 07c4e36d..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips || mipsle -// +build mips mipsle - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go deleted file mode 100644 index ebfb3fc8..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - _CTL_QUERY = -2 - - _SYSCTL_VERS_1 = 0x1000000 -) - -var _zero uintptr - -func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(_p0), - uintptr(len(mib)), - uintptr(unsafe.Pointer(old)), - uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), - uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -type sysctlNode struct { - Flags uint32 - Num int32 - Name [32]int8 - Ver uint32 - __rsvd uint32 - Un [16]byte - _sysctl_size [8]byte - _sysctl_func [8]byte - _sysctl_parent [8]byte - _sysctl_desc [8]byte -} - -func sysctlNodes(mib []int32) ([]sysctlNode, error) { - var olen uintptr - - // Get a list of all sysctl nodes below the given MIB by performing - // a sysctl for the given MIB with CTL_QUERY appended. - mib = append(mib, _CTL_QUERY) - qnode := sysctlNode{Flags: _SYSCTL_VERS_1} - qp := (*byte)(unsafe.Pointer(&qnode)) - sz := unsafe.Sizeof(qnode) - if err := sysctl(mib, nil, &olen, qp, sz); err != nil { - return nil, err - } - - // Now that we know the size, get the actual nodes. - nodes := make([]sysctlNode, olen/sz) - np := (*byte)(unsafe.Pointer(&nodes[0])) - if err := sysctl(mib, np, &olen, qp, sz); err != nil { - return nil, err - } - - return nodes, nil -} - -func nametomib(name string) ([]int32, error) { - // Split name into components. - var parts []string - last := 0 - for i := 0; i < len(name); i++ { - if name[i] == '.' { - parts = append(parts, name[last:i]) - last = i + 1 - } - } - parts = append(parts, name[last:]) - - mib := []int32{} - // Discover the nodes and construct the MIB OID. - for partno, part := range parts { - nodes, err := sysctlNodes(mib) - if err != nil { - return nil, err - } - for _, node := range nodes { - n := make([]byte, 0) - for i := range node.Name { - if node.Name[i] != 0 { - n = append(n, byte(node.Name[i])) - } - } - if string(n) == part { - mib = append(mib, int32(node.Num)) - break - } - } - if len(mib) != partno+1 { - return nil, err - } - } - - return mib, nil -} - -// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's -type aarch64SysctlCPUID struct { - midr uint64 /* Main ID Register */ - revidr uint64 /* Revision ID Register */ - mpidr uint64 /* Multiprocessor Affinity Register */ - aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ - aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ - aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ - aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ - aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ - aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ - aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ - aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ - aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ - aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ - mvfr0 uint32 /* Media and VFP Feature Register 0 */ - mvfr1 uint32 /* Media and VFP Feature Register 1 */ - mvfr2 uint32 /* Media and VFP Feature Register 2 */ - pad uint32 - clidr uint64 /* Cache Level ID Register */ - ctr uint64 /* Cache Type Register */ -} - -func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { - mib, err := nametomib(name) - if err != nil { - return nil, err - } - - out := aarch64SysctlCPUID{} - n := unsafe.Sizeof(out) - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(unsafe.Pointer(&mib[0])), - uintptr(len(mib)), - uintptr(unsafe.Pointer(&out)), - uintptr(unsafe.Pointer(&n)), - uintptr(0), - uintptr(0)) - if errno != 0 { - return nil, errno - } - return &out, nil -} - -func doinit() { - cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") - if err != nil { - setMinimalFeatures() - return - } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go deleted file mode 100644 index 85b64d5c..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - // From OpenBSD's sys/sysctl.h. - _CTL_MACHDEP = 7 - - // From OpenBSD's machine/cpu.h. - _CPU_ID_AA64ISAR0 = 2 - _CPU_ID_AA64ISAR1 = 3 -) - -// Implemented in the runtime package (runtime/sys_openbsd3.go) -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) - -//go:linkname syscall_syscall6 syscall.syscall6 - -func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - -func sysctlUint64(mib []uint32) (uint64, bool) { - var out uint64 - nout := unsafe.Sizeof(out) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { - return 0, false - } - return out, true -} - -func doinit() { - setMinimalFeatures() - - // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. - isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) - if !ok { - return - } - isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) - if !ok { - return - } - parseARM64SystemRegisters(isar0, isar1, 0) - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s deleted file mode 100644 index 054ba05d..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go deleted file mode 100644 index d7b4fb4c..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && arm -// +build !linux,arm - -package cpu - -func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index f3cde129..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && !netbsd && !openbsd && arm64 -// +build !linux,!netbsd,!openbsd,arm64 - -package cpu - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go deleted file mode 100644 index 0dafe964..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && (mips64 || mips64le) -// +build !linux -// +build mips64 mips64le - -package cpu - -func archInit() { - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go deleted file mode 100644 index 060d46b6..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !aix && !linux && (ppc64 || ppc64le) -// +build !aix -// +build !linux -// +build ppc64 ppc64le - -package cpu - -func archInit() { - PPC64.IsPOWER8 = true - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go deleted file mode 100644 index dd10eb79..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && riscv64 -// +build !linux,riscv64 - -package cpu - -func archInit() { - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go deleted file mode 100644 index 4e8acd16..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ppc64 || ppc64le -// +build ppc64 ppc64le - -package cpu - -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "darn", Feature: &PPC64.HasDARN}, - {Name: "scv", Feature: &PPC64.HasSCV}, - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go deleted file mode 100644 index bd6c128a..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build riscv64 -// +build riscv64 - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go deleted file mode 100644 index 5881b883..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -func initOptions() { - options = []option{ - {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, - {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, - {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, - {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, - {Name: "dfp", Feature: &S390X.HasDFP}, - {Name: "etf3eh", Feature: &S390X.HasETF3EH}, - {Name: "msa", Feature: &S390X.HasMSA}, - {Name: "aes", Feature: &S390X.HasAES}, - {Name: "aescbc", Feature: &S390X.HasAESCBC}, - {Name: "aesctr", Feature: &S390X.HasAESCTR}, - {Name: "aesgcm", Feature: &S390X.HasAESGCM}, - {Name: "ghash", Feature: &S390X.HasGHASH}, - {Name: "sha1", Feature: &S390X.HasSHA1}, - {Name: "sha256", Feature: &S390X.HasSHA256}, - {Name: "sha3", Feature: &S390X.HasSHA3}, - {Name: "sha512", Feature: &S390X.HasSHA512}, - {Name: "vx", Feature: &S390X.HasVX}, - {Name: "vxe", Feature: &S390X.HasVXE}, - } -} - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // mandatory facilities - zarch facility = 1 // z architecture mode is active - stflef facility = 7 // store-facility-list-extended - ldisp facility = 18 // long-displacement - eimm facility = 21 // extended-immediate - - // miscellaneous facilities - dfp facility = 42 // decimal-floating-point - etf3eh facility = 30 // extended-translation 3 enhancement - - // cryptography facilities - msa facility = 17 // message-security-assist - msa3 facility = 76 // message-security-assist extension 3 - msa4 facility = 77 // message-security-assist extension 4 - msa5 facility = 57 // message-security-assist extension 5 - msa8 facility = 146 // message-security-assist extension 8 - msa9 facility = 155 // message-security-assist extension 9 - - // vector facilities - vx facility = 129 // vector facility - vxe facility = 135 // vector-enhancements 1 - vxe2 facility = 148 // vector-enhancements 2 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - initS390Xbase() - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index 96f81e20..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index 7747d888..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build wasm -// +build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func initOptions() {} - -func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index f5aacfc8..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build 386 || amd64 || amd64p32 -// +build 386 amd64 amd64p32 - -package cpu - -import "runtime" - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "adx", Feature: &X86.HasADX}, - {Name: "aes", Feature: &X86.HasAES}, - {Name: "avx", Feature: &X86.HasAVX}, - {Name: "avx2", Feature: &X86.HasAVX2}, - {Name: "avx512", Feature: &X86.HasAVX512}, - {Name: "avx512f", Feature: &X86.HasAVX512F}, - {Name: "avx512cd", Feature: &X86.HasAVX512CD}, - {Name: "avx512er", Feature: &X86.HasAVX512ER}, - {Name: "avx512pf", Feature: &X86.HasAVX512PF}, - {Name: "avx512vl", Feature: &X86.HasAVX512VL}, - {Name: "avx512bw", Feature: &X86.HasAVX512BW}, - {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, - {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, - {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, - {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, - {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, - {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, - {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, - {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, - {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, - {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, - {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, - {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, - {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, - {Name: "bmi1", Feature: &X86.HasBMI1}, - {Name: "bmi2", Feature: &X86.HasBMI2}, - {Name: "cx16", Feature: &X86.HasCX16}, - {Name: "erms", Feature: &X86.HasERMS}, - {Name: "fma", Feature: &X86.HasFMA}, - {Name: "osxsave", Feature: &X86.HasOSXSAVE}, - {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, - {Name: "popcnt", Feature: &X86.HasPOPCNT}, - {Name: "rdrand", Feature: &X86.HasRDRAND}, - {Name: "rdseed", Feature: &X86.HasRDSEED}, - {Name: "sse3", Feature: &X86.HasSSE3}, - {Name: "sse41", Feature: &X86.HasSSE41}, - {Name: "sse42", Feature: &X86.HasSSE42}, - {Name: "ssse3", Feature: &X86.HasSSSE3}, - - // These capabilities should always be enabled on amd64: - {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, - } -} - -func archInit() { - - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - var osSupportsAVX, osSupportsAVX512 bool - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - - if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false - } else { - // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) - } - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - _, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension - if X86.HasAVX512 { - X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) - } -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func readHWCAP() error { - buf, err := ioutil.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false. On some - // architectures (e.g. arm64) doinit() implements a fallback - // readout and will set Initialized = true again. - return err - } - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil -} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go deleted file mode 100644 index 762b63d6..00000000 --- a/vendor/golang.org/x/sys/cpu/parse.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "strconv" - -// parseRelease parses a dot-separated version number. It follows the semver -// syntax, but allows the minor and patch versions to be elided. -// -// This is a copy of the Go runtime's parseRelease from -// https://golang.org/cl/209597. -func parseRelease(rel string) (major, minor, patch int, ok bool) { - // Strip anything after a dash or plus. - for i := 0; i < len(rel); i++ { - if rel[i] == '-' || rel[i] == '+' { - rel = rel[:i] - break - } - } - - next := func() (int, bool) { - for i := 0; i < len(rel); i++ { - if rel[i] == '.' { - ver, err := strconv.Atoi(rel[:i]) - rel = rel[i+1:] - return ver, err == nil - } - } - ver, err := strconv.Atoi(rel) - rel = "" - return ver, err == nil - } - if major, ok = next(); !ok || rel == "" { - return - } - if minor, ok = next(); !ok || rel == "" { - return - } - patch, ok = next() - return -} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go deleted file mode 100644 index d87bd6b3..00000000 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && arm64 -// +build linux,arm64 - -package cpu - -import ( - "errors" - "io" - "os" - "strings" -) - -func readLinuxProcCPUInfo() error { - f, err := os.Open("/proc/cpuinfo") - if err != nil { - return err - } - defer f.Close() - - var buf [1 << 10]byte // enough for first CPU - n, err := io.ReadFull(f, buf[:]) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - in := string(buf[:n]) - const features = "\nFeatures : " - i := strings.Index(in, features) - if i == -1 { - return errors.New("no CPU features found") - } - in = in[i+len(features):] - if i := strings.Index(in, "\n"); i != -1 { - in = in[:i] - } - m := map[string]*bool{} - - initOptions() // need it early here; it's harmless to call twice - for _, o := range options { - m[o.Name] = o.Feature - } - // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". - m["evtstrm"] = &ARM64.HasEVTSTRM - - for _, f := range strings.Fields(in) { - if p, ok := m[f]; ok { - *p = true - } - } - return nil -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go deleted file mode 100644 index 96134157..00000000 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Recreate a getsystemcfg syscall handler instead of -// using the one provided by x/sys/unix to avoid having -// the dependency between them. (See golang.org/issue/32102) -// Moreover, this file will be used during the building of -// gccgo's libgo and thus must not used a CGo method. - -//go:build aix && gccgo -// +build aix,gccgo - -package cpu - -import ( - "syscall" -) - -//extern getsystemcfg -func gccgoGetsystemcfg(label uint32) (r uint64) - -func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { - r1 = uintptr(gccgoGetsystemcfg(uint32(label))) - e1 = syscall.GetErrno() - return -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go deleted file mode 100644 index 904be42f..00000000 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Minimal copy of x/sys/unix so the cpu package can make a -// system call on AIX without depending on x/sys/unix. -// (See golang.org/issue/32102) - -//go:build aix && ppc64 && gc -// +build aix,ppc64,gc - -package cpu - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" - -//go:linkname libc_getsystemcfg libc_getsystemcfg - -type syscallFunc uintptr - -var libc_getsystemcfg syscallFunc - -type errno = syscall.Errno - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) - -func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) - return -} diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go deleted file mode 100644 index df7aa02d..00000000 --- a/vendor/golang.org/x/text/runes/cond.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runes - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. -// This is done for various reasons: -// - To retain the semantics of the Nop transformer: if input is passed to a Nop -// one would expect it to be unchanged. -// - It would be very expensive to pass a converted RuneError to a transformer: -// a transformer might need more source bytes after RuneError, meaning that -// the only way to pass it safely is to create a new buffer and manage the -// intermingling of RuneErrors and normal input. -// - Many transformers leave ill-formed UTF-8 as is, so this is not -// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a -// logical consequence of the operation (as for Map) or if it otherwise would -// pose security concerns (as for Remove). -// - An alternative would be to return an error on ill-formed UTF-8, but this -// would be inconsistent with other operations. - -// If returns a transformer that applies tIn to consecutive runes for which -// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset -// is called on tIn and tNotIn at the start of each run. A Nop transformer will -// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated -// to RuneError to determine which transformer to apply, but is passed as is to -// the respective transformer. -func If(s Set, tIn, tNotIn transform.Transformer) Transformer { - if tIn == nil && tNotIn == nil { - return Transformer{transform.Nop} - } - if tIn == nil { - tIn = transform.Nop - } - if tNotIn == nil { - tNotIn = transform.Nop - } - sIn, ok := tIn.(transform.SpanningTransformer) - if !ok { - sIn = dummySpan{tIn} - } - sNotIn, ok := tNotIn.(transform.SpanningTransformer) - if !ok { - sNotIn = dummySpan{tNotIn} - } - - a := &cond{ - tIn: sIn, - tNotIn: sNotIn, - f: s.Contains, - } - a.Reset() - return Transformer{a} -} - -type dummySpan struct{ transform.Transformer } - -func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { - return 0, transform.ErrEndOfSpan -} - -type cond struct { - tIn, tNotIn transform.SpanningTransformer - f func(rune) bool - check func(rune) bool // current check to perform - t transform.SpanningTransformer // current transformer to use -} - -// Reset implements transform.Transformer. -func (t *cond) Reset() { - t.check = t.is - t.t = t.tIn - t.t.Reset() // notIn will be reset on first usage. -} - -func (t *cond) is(r rune) bool { - if t.f(r) { - return true - } - t.check = t.isNot - t.t = t.tNotIn - t.tNotIn.Reset() - return false -} - -func (t *cond) isNot(r rune) bool { - if !t.f(r) { - return true - } - t.check = t.is - t.t = t.tIn - t.tIn.Reset() - return false -} - -// This implementation of Span doesn't help all too much, but it needs to be -// there to satisfy this package's Transformer interface. -// TODO: there are certainly room for improvements, though. For example, if -// t.t == transform.Nop (which will a common occurrence) it will save a bundle -// to special-case that loop. -func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { - p := 0 - for n < len(src) && err == nil { - // Don't process too much at a time as the Spanner that will be - // called on this block may terminate early. - const maxChunk = 4096 - max := len(src) - if v := n + maxChunk; v < max { - max = v - } - atEnd := false - size := 0 - current := t.t - for ; p < max; p += size { - r := rune(src[p]) - if r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { - if !atEOF && !utf8.FullRune(src[p:]) { - err = transform.ErrShortSrc - break - } - } - if !t.check(r) { - // The next rune will be the start of a new run. - atEnd = true - break - } - } - n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) - n += n2 - if err2 != nil { - return n, err2 - } - // At this point either err != nil or t.check will pass for the rune at p. - p = n + size - } - return n, err -} - -func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - p := 0 - for nSrc < len(src) && err == nil { - // Don't process too much at a time, as the work might be wasted if the - // destination buffer isn't large enough to hold the result or a - // transform returns an error early. - const maxChunk = 4096 - max := len(src) - if n := nSrc + maxChunk; n < len(src) { - max = n - } - atEnd := false - size := 0 - current := t.t - for ; p < max; p += size { - r := rune(src[p]) - if r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { - if !atEOF && !utf8.FullRune(src[p:]) { - err = transform.ErrShortSrc - break - } - } - if !t.check(r) { - // The next rune will be the start of a new run. - atEnd = true - break - } - } - nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) - nDst += nDst2 - nSrc += nSrc2 - if err2 != nil { - return nDst, nSrc, err2 - } - // At this point either err != nil or t.check will pass for the rune at p. - p = nSrc + size - } - return nDst, nSrc, err -} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go deleted file mode 100644 index 930e87fe..00000000 --- a/vendor/golang.org/x/text/runes/runes.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package runes provide transforms for UTF-8 encoded text. -package runes // import "golang.org/x/text/runes" - -import ( - "unicode" - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// A Set is a collection of runes. -type Set interface { - // Contains returns true if r is contained in the set. - Contains(r rune) bool -} - -type setFunc func(rune) bool - -func (s setFunc) Contains(r rune) bool { - return s(r) -} - -// Note: using funcs here instead of wrapping types result in cleaner -// documentation and a smaller API. - -// In creates a Set with a Contains method that returns true for all runes in -// the given RangeTable. -func In(rt *unicode.RangeTable) Set { - return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) -} - -// NotIn creates a Set with a Contains method that returns true for all runes not -// in the given RangeTable. -func NotIn(rt *unicode.RangeTable) Set { - return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) -} - -// Predicate creates a Set with a Contains method that returns f(r). -func Predicate(f func(rune) bool) Set { - return setFunc(f) -} - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.SpanningTransformer -} - -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { - return t.t.Span(b, atEOF) -} - -func (t Transformer) Reset() { t.t.Reset() } - -// Bytes returns a new byte slice with the result of converting b using t. It -// calls Reset on t. It returns nil if any error was found. This can only happen -// if an error-producing Transformer is passed to If. -func (t Transformer) Bytes(b []byte) []byte { - b, _, err := transform.Bytes(t, b) - if err != nil { - return nil - } - return b -} - -// String returns a string with the result of converting s using t. It calls -// Reset on t. It returns the empty string if any error was found. This can only -// happen if an error-producing Transformer is passed to If. -func (t Transformer) String(s string) string { - s, _, err := transform.String(t, s) - if err != nil { - return "" - } - return s -} - -// TODO: -// - Copy: copying strings and bytes in whole-rune units. -// - Validation (maybe) -// - Well-formed-ness (maybe) - -const runeErrorString = string(utf8.RuneError) - -// Remove returns a Transformer that removes runes r for which s.Contains(r). -// Illegal input bytes are replaced by RuneError before being passed to f. -func Remove(s Set) Transformer { - if f, ok := s.(setFunc); ok { - // This little trick cuts the running time of BenchmarkRemove for sets - // created by Predicate roughly in half. - // TODO: special-case RangeTables as well. - return Transformer{remove(f)} - } - return Transformer{remove(s.Contains)} -} - -// TODO: remove transform.RemoveFunc. - -type remove func(r rune) bool - -func (remove) Reset() {} - -// Span implements transform.Spanner. -func (t remove) Span(src []byte, atEOF bool) (n int, err error) { - for r, size := rune(0), 0; n < len(src); { - if r = rune(src[n]); r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[n:]) { - err = transform.ErrShortSrc - } else { - err = transform.ErrEndOfSpan - } - break - } - if t(r) { - err = transform.ErrEndOfSpan - break - } - n += size - } - return -} - -// Transform implements transform.Transformer. -func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for r, size := rune(0), 0; nSrc < len(src); { - if r = rune(src[nSrc]); r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - // We replace illegal bytes with RuneError. Not doing so might - // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. - // The resulting byte sequence may subsequently contain runes - // for which t(r) is true that were passed unnoticed. - if !t(utf8.RuneError) { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - } - nSrc++ - continue - } - if t(r) { - nSrc += size - continue - } - if nDst+size > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < size; i++ { - dst[nDst] = src[nSrc] - nDst++ - nSrc++ - } - } - return -} - -// Map returns a Transformer that maps the runes in the input using the given -// mapping. Illegal bytes in the input are converted to utf8.RuneError before -// being passed to the mapping func. -func Map(mapping func(rune) rune) Transformer { - return Transformer{mapper(mapping)} -} - -type mapper func(rune) rune - -func (mapper) Reset() {} - -// Span implements transform.Spanner. -func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { - for r, size := rune(0), 0; n < len(src); n += size { - if r = rune(src[n]); r < utf8.RuneSelf { - size = 1 - } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[n:]) { - err = transform.ErrShortSrc - } else { - err = transform.ErrEndOfSpan - } - break - } - if t(r) != r { - err = transform.ErrEndOfSpan - break - } - } - return n, err -} - -// Transform implements transform.Transformer. -func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - var replacement rune - var b [utf8.UTFMax]byte - - for r, size := rune(0), 0; nSrc < len(src); { - if r = rune(src[nSrc]); r < utf8.RuneSelf { - if replacement = t(r); replacement < utf8.RuneSelf { - if nDst == len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst] = byte(replacement) - nDst++ - nSrc++ - continue - } - size = 1 - } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - - if replacement = t(utf8.RuneError); replacement == utf8.RuneError { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - nSrc++ - continue - } - } else if replacement = t(r); replacement == r { - if nDst+size > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < size; i++ { - dst[nDst] = src[nSrc] - nDst++ - nSrc++ - } - continue - } - - n := utf8.EncodeRune(b[:], replacement) - - if nDst+n > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < n; i++ { - dst[nDst] = b[i] - nDst++ - } - nSrc += size - } - return -} - -// ReplaceIllFormed returns a transformer that replaces all input bytes that are -// not part of a well-formed UTF-8 code sequence with utf8.RuneError. -func ReplaceIllFormed() Transformer { - return Transformer{&replaceIllFormed{}} -} - -type replaceIllFormed struct{ transform.NopResetter } - -func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - // ASCII fast path. - if src[n] < utf8.RuneSelf { - n++ - continue - } - - r, size := utf8.DecodeRune(src[n:]) - - // Look for a valid non-ASCII rune. - if r != utf8.RuneError || size != 1 { - n += size - continue - } - - // Look for short source data. - if !atEOF && !utf8.FullRune(src[n:]) { - err = transform.ErrShortSrc - break - } - - // We have an invalid rune. - err = transform.ErrEndOfSpan - break - } - return n, err -} - -func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - // ASCII fast path. - if r := src[nSrc]; r < utf8.RuneSelf { - if nDst == len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst] = r - nDst++ - nSrc++ - continue - } - - // Look for a valid non-ASCII rune. - if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - err = transform.ErrShortDst - break - } - nDst += size - nSrc += size - continue - } - - // Look for short source data. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - - // We have an invalid rune. - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - nSrc++ - } - return nDst, nSrc, err -} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4..a6b50818 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 392b21fb..09d61dd1 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -279,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metatada metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f2..66d141fc 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 422639c7..d607d4e9 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) if err != nil { return nil, err } @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -928,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, @@ -1231,9 +1237,11 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - onClose := grpcsync.OnceFunc(func() { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() + // adjust params based on GoAwayReason + ac.adjustParams(r) if ac.state == connectivity.Shutdown { // Already shut down. tearDown() already cleared the transport and // canceled hctx via ac.ctx, and we expected this connection to be @@ -1254,20 +1262,17 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // Always go idle and wait for the LB policy to initiate a new // connection attempt. ac.updateConnectivityState(connectivity.Idle, nil) - }) - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) @@ -1371,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1582,30 +1587,17 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and endpoint. Query +// resolver.Target struct containing scheme, authority and url. Query // params are stripped from the endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field instead of the `Endpoint` field. - endpoint := u.Path - if endpoint == "" { - endpoint = u.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ Scheme: u.Scheme, Authority: u.Host, - Endpoint: endpoint, URL: *u, }, nil } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index ce2bbc10..877b7cd2 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9372dc32..4866da10 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -44,6 +44,7 @@ func init() { extraDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -111,13 +112,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +143,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 711763d5..07a58613 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -75,7 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c - grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index b5560b47..5de66e40 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" "strconv" @@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index a66024d2..8e29a62f 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26..d71e4417 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -26,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic // in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } return m @@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated return truncated } -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated // LogEntryConfig represents the configuration for binary log entry. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -159,10 +159,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -195,19 +195,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -223,7 +223,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -263,7 +263,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -300,15 +300,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -324,7 +324,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -367,15 +367,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index c2fdd58b..264de387 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7edd196b..5ba9d94d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,19 +21,42 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" - advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" -) - var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) // AdvertiseCompressors is set if registered compressor should be advertised // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index af09711a..04136882 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -20,7 +20,6 @@ package envconfig import ( "os" - "strings" ) const ( @@ -36,16 +35,6 @@ const ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" - federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" - - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -64,38 +53,40 @@ var ( // XDSRingHash indicates whether ring hash support is enabled, which can be // disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) // XDSClientSideSecurity is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) // XDSAggregateAndDNS indicates whether processing of aggregated cluster // and DNS cluster is enabled, which can be enabled by setting the // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be disabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") - // XDSFederation indicates whether federation support is enabled. - XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) // XDSRLS indicates whether processing of Cluster Specifier plugins and // support for the RLS CLuster Specifier is enabled, which can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to // "true". - XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") ) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index fd0ee3dc..0a76d9de 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -77,6 +77,9 @@ var ( // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c51..09a667f3 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -116,7 +116,7 @@ type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229..afac5657 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702c..16091168 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f4..9097385e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,9 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() + // Always flush the writer before exiting in case there are pending frames + // to be sent. + defer l.framer.writer.Flush() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +573,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -655,19 +653,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -763,8 +762,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +798,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +816,13 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +851,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2..bc8ee074 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fb272235..e6626bf9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d518b07e..79ee8aea 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -136,8 +140,7 @@ type http2Client struct { channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool @@ -193,7 +196,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -213,7 +216,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } // Any further errors will close the underlying connection @@ -238,8 +241,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } conn.Close() } }(conn) @@ -314,6 +320,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -335,7 +342,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, @@ -440,10 +446,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -702,6 +706,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -726,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) @@ -752,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -767,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() @@ -846,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, sh.HandleRPC(s.ctx, outHeader) } } + if transportDrainRequired { + if logger.V(logLevel) { + logger.Infof("transport: t.nextID > MaxStreamID. Draining") + } + t.GracefulClose() + } return s, nil } @@ -934,9 +959,14 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. - t.onClose() + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -986,11 +1016,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -1148,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) } statusCode = codes.Unknown } @@ -1266,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1756,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647..bc3da706 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(map[string][]string) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -398,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) @@ -413,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } // "Transports must consider requests containing the Connection header // as malformed." - A41 @@ -421,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if logger.V(logLevel) { logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") } - headerError = true + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } @@ -447,23 +448,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( logger.Errorf("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 400, + httpStatus: http.StatusBadRequest, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } - if !isGRPC || headerError { + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +524,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +571,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +618,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +651,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +861,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +904,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1168,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1184,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1211,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1310,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1334,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1369,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 2e615ee2..0ac77ea4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -583,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -701,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a5d5516e..c525dc07 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -58,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { acw.mu.Lock() ac := acw.ac acw.mu.Unlock() ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -82,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -90,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -111,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -125,7 +131,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue @@ -136,7 +141,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, nil, dropError{error: err} + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -144,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -154,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0..fc91b4d2 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -51,7 +51,7 @@ type pickfirstBalancer struct { func (b *pickfirstBalancer) ResolverError(err error) { if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index c22f9a52..ee4b04ca 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,19 +11,20 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +38,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { state protoimpl.MessageState @@ -65,7 +62,7 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -78,7 +75,7 @@ func (x *ServerReflectionRequest) String() string { func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -91,7 +88,7 @@ func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } func (x *ServerReflectionRequest) GetHost() string { @@ -209,7 +206,7 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -222,7 +219,7 @@ func (x *ExtensionRequest) String() string { func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -235,7 +232,7 @@ func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } func (x *ExtensionRequest) GetContainingType() string { @@ -260,8 +257,8 @@ type ServerReflectionResponse struct { ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server sets one of the following fields according to the - // message_request in the request. + // The server set one of the following fields according to the message_request + // in the request. // // Types that are assignable to MessageResponse: // @@ -275,7 +272,7 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -288,7 +285,7 @@ func (x *ServerReflectionResponse) String() string { func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -301,7 +298,7 @@ func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } func (x *ServerReflectionResponse) GetValidHost() string { @@ -359,8 +356,8 @@ type isServerReflectionResponse_MessageResponse interface { type ServerReflectionResponse_FileDescriptorResponse struct { // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. @@ -368,12 +365,12 @@ type ServerReflectionResponse_FileDescriptorResponse struct { } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requests. + // This message is used to answer all_extension_numbers_of_type requst. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services requests. + // This message is used to answer list_services request. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } @@ -409,7 +406,7 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -422,7 +419,7 @@ func (x *FileDescriptorResponse) String() string { func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -435,7 +432,7 @@ func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { @@ -461,7 +458,7 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -474,7 +471,7 @@ func (x *ExtensionNumberResponse) String() string { func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -487,7 +484,7 @@ func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } func (x *ExtensionNumberResponse) GetBaseTypeName() string { @@ -518,7 +515,7 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -531,7 +528,7 @@ func (x *ListServiceResponse) String() string { func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -544,7 +541,7 @@ func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } func (x *ListServiceResponse) GetService() []*ServiceResponse { @@ -569,7 +566,7 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -582,7 +579,7 @@ func (x *ServiceResponse) String() string { func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -595,7 +592,7 @@ func (x *ServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } func (x *ServiceResponse) GetName() string { @@ -619,7 +616,7 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -632,7 +629,7 @@ func (x *ErrorResponse) String() string { func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -645,7 +642,7 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } func (x *ErrorResponse) GetErrorCode() int32 { @@ -662,136 +659,139 @@ func (x *ErrorResponse) GetErrorMessage() string { return "" } -var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, - 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, - 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, - 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, - 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, - 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, - 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, - 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, - 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, - 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, - 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, - 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, - 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, - 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc ) -func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) }) - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -801,7 +801,7 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interfa (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse @@ -818,13 +818,13 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 0, // [0:7] is the sub-list for field type_name } -func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } -func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { - if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -836,7 +836,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -848,7 +848,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -860,7 +860,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -872,7 +872,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -884,7 +884,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -896,7 +896,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -908,7 +908,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -921,14 +921,14 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), @@ -938,18 +938,18 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, NumEnums: 0, NumMessages: 8, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() - File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto deleted file mode 100644 index ee2b82c0..00000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection - -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; - -package grpc.reflection.v1alpha; - -service ServerReflection { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - rpc ServerReflectionInfo(stream ServerReflectionRequest) - returns (stream ServerReflectionResponse); -} - -// The message sent by the client when calling ServerReflectionInfo method. -message ServerReflectionRequest { - string host = 1; - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - oneof message_request { - // Find a proto file by the file name. - string file_by_filename = 3; - - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. .[.] or .). - string file_containing_symbol = 4; - - // Find the proto file which defines an extension extending the given - // message type with the given field number. - ExtensionRequest file_containing_extension = 5; - - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // . - string all_extension_numbers_of_type = 6; - - // List the full names of registered services. The content will not be - // checked. - string list_services = 7; - } -} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -message ExtensionRequest { - // Fully-qualified type name. The format should be . - string containing_type = 1; - int32 extension_number = 2; -} - -// The message sent by the server to answer ServerReflectionInfo method. -message ServerReflectionResponse { - string valid_host = 1; - ServerReflectionRequest original_request = 2; - // The server sets one of the following fields according to the - // message_request in the request. - oneof message_response { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse file_descriptor_response = 4; - - // This message is used to answer all_extension_numbers_of_type requests. - ExtensionNumberResponse all_extension_numbers_response = 5; - - // This message is used to answer list_services requests. - ListServiceResponse list_services_response = 6; - - // This message is used when an error occurs. - ErrorResponse error_response = 7; - } -} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -message FileDescriptorResponse { - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - repeated bytes file_descriptor_proto = 1; -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -message ExtensionNumberResponse { - // Full name of the base type, including the package name. The format - // is . - string base_type_name = 1; - repeated int32 extension_number = 2; -} - -// A list of ServiceResponse sent by the server answering list_services request. -message ListServiceResponse { - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - repeated ServiceResponse service = 1; -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -message ServiceResponse { - // Full name of a registered service, including its package name. The format - // is . - string name = 1; -} - -// The error code and error message sent by the server when an error occurs. -message ErrorResponse { - // This field uses the error codes defined in grpc::StatusCode. - int32 error_code = 1; - string error_message = 2; -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index b8e76a87..ed54ab13 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -151,5 +153,5 @@ var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", + Metadata: "grpc/reflection/v1alpha/reflection.proto", } diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 0b41783a..e2f9ebfb 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -42,12 +42,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" + + v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,7 +65,7 @@ var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { svr := NewServer(ServerOptions{Services: s}) - rpb.RegisterServerReflectionServer(s, svr) + v1alphagrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -124,7 +126,7 @@ type ServerOptions struct { // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) rpb.ServerReflectionServer { +func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -139,7 +141,7 @@ func NewServer(opts ServerOptions) rpb.ServerReflectionServer { } type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer + v1alphagrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -213,11 +215,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &rpb.ServiceResponse{Name: svc}) + resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -226,7 +228,7 @@ func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -237,79 +239,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio return err } - out := &rpb.ServerReflectionResponse{ + out := &v1alphapb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *rpb.ServerReflectionRequest_FileByFilename: + case *v1alphapb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingSymbol: + case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingExtension: + case *v1alphapb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *rpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &rpb.ListServiceResponse{ + case *v1alphapb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphapb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 99db79fa..a6f26c8a 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 967cbc73..654e9ce6 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -24,6 +24,7 @@ import ( "context" "net" "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -247,9 +248,6 @@ type Target struct { Scheme string // Deprecated: use URL.Host instead. Authority string - // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when - // the former is empty. - Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -257,6 +255,24 @@ type Target struct { URL url.URL } +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 934fc1aa..cb7020eb 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error { func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -711,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -746,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } @@ -759,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return err } if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f4dde72b..d5a6e78b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1303,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 01bbb202..f22acace 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } d, err := parseDuration(m.Timeout) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 960c3e33..93231af2 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -438,7 +438,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -455,6 +455,25 @@ func (a *csAttempt) getTransport() error { func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metatada != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metatada) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) @@ -529,12 +548,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -1103,12 +1122,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1464,6 +1483,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 2198e709..fe552c31 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0" +const Version = "1.53.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bd8e0cdb..3728aed0 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -66,6 +66,17 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. # (Done in two parts because Darwin "git grep" has broken support for compound # exclusion matches.) @@ -93,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - # - gofmt, goimports, golint (with exceptions for generated code), go vet, # go mod tidy. # Perform these checks on each module inside gRPC. @@ -111,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -121,8 +125,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/vendor/modules.txt b/vendor/modules.txt index 1771c6fa..dd18419c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,18 +7,9 @@ github.com/PuerkitoBio/urlesc # github.com/agext/levenshtein v1.2.2 ## explicit github.com/agext/levenshtein -# github.com/apparentlymart/go-cidr v1.1.0 -## explicit -github.com/apparentlymart/go-cidr/cidr # github.com/apparentlymart/go-textseg/v13 v13.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v13/textseg -# github.com/apparentlymart/go-versions v1.0.0 -## explicit; go 1.14 -github.com/apparentlymart/go-versions/versions -github.com/apparentlymart/go-versions/versions/constraints -# github.com/aws/aws-sdk-go v1.31.9 -## explicit; go 1.11 # github.com/aws/aws-sdk-go-v2 v1.17.5 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2 @@ -102,12 +93,6 @@ github.com/aws/smithy-go/rand github.com/aws/smithy-go/time github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io -# github.com/blang/semver v3.5.1+incompatible -## explicit -github.com/blang/semver -# github.com/bmatcuk/doublestar v1.1.5 -## explicit; go 1.12 -github.com/bmatcuk/doublestar # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -160,15 +145,9 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.1.0 ## explicit; go 1.12 github.com/google/gofuzz -# github.com/google/uuid v1.1.2 -## explicit -github.com/google/uuid # github.com/hashicorp/errwrap v1.0.0 ## explicit github.com/hashicorp/errwrap -# github.com/hashicorp/go-cleanhttp v0.5.2 -## explicit; go 1.13 -github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 ## explicit; go 1.12 github.com/hashicorp/go-cty/cty @@ -187,76 +166,20 @@ github.com/hashicorp/go-multierror ## explicit; go 1.17 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin -# github.com/hashicorp/go-retryablehttp v0.5.2 -## explicit -github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version -# github.com/hashicorp/hcl v1.0.0 -## explicit -github.com/hashicorp/hcl -github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/scanner -github.com/hashicorp/hcl/hcl/strconv -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser -github.com/hashicorp/hcl/json/scanner -github.com/hashicorp/hcl/json/token # github.com/hashicorp/hcl/v2 v2.16.1 ## explicit; go 1.18 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode -github.com/hashicorp/hcl/v2/ext/dynblock -github.com/hashicorp/hcl/v2/ext/tryfunc -github.com/hashicorp/hcl/v2/ext/typeexpr -github.com/hashicorp/hcl/v2/gohcl -github.com/hashicorp/hcl/v2/hcldec -github.com/hashicorp/hcl/v2/hclparse github.com/hashicorp/hcl/v2/hclsyntax -github.com/hashicorp/hcl/v2/hclwrite -github.com/hashicorp/hcl/v2/json -# github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 -## explicit -github.com/hashicorp/hil -github.com/hashicorp/hil/ast -github.com/hashicorp/hil/parser -github.com/hashicorp/hil/scanner # github.com/hashicorp/logutils v1.0.0 ## explicit github.com/hashicorp/logutils -# github.com/hashicorp/terraform v0.13.0-beta1 -## explicit; go 1.14 -github.com/hashicorp/terraform/addrs -github.com/hashicorp/terraform/config -github.com/hashicorp/terraform/configs -github.com/hashicorp/terraform/configs/configschema -github.com/hashicorp/terraform/configs/hcl2shim -github.com/hashicorp/terraform/dag -github.com/hashicorp/terraform/experiments -github.com/hashicorp/terraform/helper/didyoumean -github.com/hashicorp/terraform/helper/hilmapstructure -github.com/hashicorp/terraform/helper/logging -github.com/hashicorp/terraform/httpclient -github.com/hashicorp/terraform/instances -github.com/hashicorp/terraform/internal/getproviders -github.com/hashicorp/terraform/lang -github.com/hashicorp/terraform/lang/blocktoattr -github.com/hashicorp/terraform/lang/funcs -github.com/hashicorp/terraform/plans -github.com/hashicorp/terraform/plans/objchange -github.com/hashicorp/terraform/plugin/discovery -github.com/hashicorp/terraform/providers -github.com/hashicorp/terraform/provisioners -github.com/hashicorp/terraform/states -github.com/hashicorp/terraform/states/statefile -github.com/hashicorp/terraform/terraform -github.com/hashicorp/terraform/tfdiags -github.com/hashicorp/terraform/version # github.com/hashicorp/terraform-plugin-go v0.14.3 ## explicit; go 1.18 github.com/hashicorp/terraform-plugin-go/internal/logging @@ -304,8 +227,6 @@ github.com/hashicorp/terraform-registry-address # github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 ## explicit; go 1.12 github.com/hashicorp/terraform-svchost -github.com/hashicorp/terraform-svchost/auth -github.com/hashicorp/terraform-svchost/disco # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d ## explicit github.com/hashicorp/yamux @@ -332,18 +253,12 @@ github.com/mattn/go-isatty # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure -# github.com/mitchellh/go-homedir v1.1.0 -## explicit -github.com/mitchellh/go-homedir # github.com/mitchellh/go-testing-interface v1.14.1 ## explicit; go 1.14 github.com/mitchellh/go-testing-interface # github.com/mitchellh/go-wordwrap v1.0.0 ## explicit github.com/mitchellh/go-wordwrap -# github.com/mitchellh/hashstructure v1.0.0 -## explicit -github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -368,11 +283,6 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/spf13/afero v1.9.5 -## explicit; go 1.16 -github.com/spf13/afero -github.com/spf13/afero/internal/common -github.com/spf13/afero/mem # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -400,33 +310,7 @@ github.com/zclconf/go-cty/cty/function github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json -github.com/zclconf/go-cty/cty/msgpack github.com/zclconf/go-cty/cty/set -# github.com/zclconf/go-cty-yaml v1.0.2 -## explicit -github.com/zclconf/go-cty-yaml -# golang.org/x/crypto v0.6.0 -## explicit; go 1.17 -golang.org/x/crypto/bcrypt -golang.org/x/crypto/blowfish -golang.org/x/crypto/cast5 -golang.org/x/crypto/chacha20 -golang.org/x/crypto/curve25519 -golang.org/x/crypto/curve25519/internal/field -golang.org/x/crypto/ed25519 -golang.org/x/crypto/internal/alias -golang.org/x/crypto/internal/poly1305 -golang.org/x/crypto/openpgp -golang.org/x/crypto/openpgp/armor -golang.org/x/crypto/openpgp/elgamal -golang.org/x/crypto/openpgp/errors -golang.org/x/crypto/openpgp/packet -golang.org/x/crypto/openpgp/s2k -golang.org/x/crypto/ssh -golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/mod v0.9.0 -## explicit; go 1.17 -golang.org/x/mod/sumdb/dirhash # golang.org/x/net v0.6.0 ## explicit; go 1.17 golang.org/x/net/context @@ -437,13 +321,12 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 -## explicit; go 1.11 +# golang.org/x/oauth2 v0.4.0 +## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sys v0.5.0 ## explicit; go 1.17 -golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix @@ -453,7 +336,6 @@ golang.org/x/sys/windows golang.org/x/term # golang.org/x/text v0.7.0 ## explicit; go 1.17 -golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi @@ -477,10 +359,10 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 -## explicit; go 1.15 +# google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f +## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.51.0 +# google.golang.org/grpc v1.53.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes